aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/bcma/bcma_private.h9
-rw-r--r--drivers/bcma/core.c10
-rw-r--r--drivers/bcma/driver_chipcommon.c5
-rw-r--r--drivers/bcma/driver_chipcommon_pmu.c369
-rw-r--r--drivers/bcma/driver_mips.c24
-rw-r--r--drivers/bcma/driver_pci_host.c18
-rw-r--r--drivers/bcma/host_pci.c5
-rw-r--r--drivers/bcma/main.c19
-rw-r--r--drivers/bcma/scan.c30
-rw-r--r--drivers/bcma/sprom.c26
-rw-r--r--drivers/bluetooth/bluecard_cs.c10
-rw-r--r--drivers/bluetooth/bpa10x.c2
-rw-r--r--drivers/bluetooth/bt3c_cs.c4
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c3
-rw-r--r--drivers/bluetooth/btuart_cs.c4
-rw-r--r--drivers/bluetooth/btusb.c14
-rw-r--r--drivers/bluetooth/dtl1_cs.c4
-rw-r--r--drivers/bluetooth/hci_bcsp.c2
-rw-r--r--drivers/bluetooth/hci_h4.c2
-rw-r--r--drivers/bluetooth/hci_ldisc.c2
-rw-r--r--drivers/bluetooth/hci_ll.c6
-rw-r--r--drivers/connector/cn_proc.c36
-rw-r--r--drivers/connector/cn_queue.c12
-rw-r--r--drivers/connector/connector.c30
-rw-r--r--drivers/ieee802154/Kconfig6
-rw-r--r--drivers/ieee802154/Makefile1
-rw-r--r--drivers/ieee802154/at86rf230.c968
-rw-r--r--drivers/infiniband/core/netlink.c17
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c5
-rw-r--r--drivers/infiniband/hw/mlx4/main.c65
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c35
-rw-r--r--drivers/isdn/gigaset/capi.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c18
-rw-r--r--drivers/isdn/hisax/hfc_usb.c18
-rw-r--r--drivers/isdn/hisax/isurf.c5
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/iwmc3200top/Kconfig20
-rw-r--r--drivers/misc/iwmc3200top/Makefile29
-rw-r--r--drivers/misc/iwmc3200top/debugfs.c137
-rw-r--r--drivers/misc/iwmc3200top/debugfs.h58
-rw-r--r--drivers/misc/iwmc3200top/fw-download.c358
-rw-r--r--drivers/misc/iwmc3200top/fw-msg.h113
-rw-r--r--drivers/misc/iwmc3200top/iwmc3200top.h205
-rw-r--r--drivers/misc/iwmc3200top/log.c348
-rw-r--r--drivers/misc/iwmc3200top/log.h171
-rw-r--r--drivers/misc/iwmc3200top/main.c662
-rw-r--r--drivers/net/appletalk/cops.c2
-rw-r--r--drivers/net/bonding/bond_3ad.c13
-rw-r--r--drivers/net/bonding/bond_3ad.h4
-rw-r--r--drivers/net/bonding/bond_alb.c26
-rw-r--r--drivers/net/bonding/bond_main.c64
-rw-r--r--drivers/net/bonding/bonding.h4
-rw-r--r--drivers/net/caif/caif_hsi.c548
-rw-r--r--drivers/net/can/bfin_can.c2
-rw-r--r--drivers/net/can/c_can/Kconfig20
-rw-r--r--drivers/net/can/c_can/Makefile1
-rw-r--r--drivers/net/can/c_can/c_can.c120
-rw-r--r--drivers/net/can/c_can/c_can.h163
-rw-r--r--drivers/net/can/c_can/c_can_pci.c221
-rw-r--r--drivers/net/can/c_can/c_can_platform.c76
-rw-r--r--drivers/net/can/cc770/cc770.c2
-rw-r--r--drivers/net/can/dev.c37
-rw-r--r--drivers/net/can/flexcan.c107
-rw-r--r--drivers/net/can/mcp251x.c3
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c2
-rw-r--r--drivers/net/can/softing/softing_main.c2
-rw-r--r--drivers/net/can/vcan.c27
-rw-r--r--drivers/net/cris/eth_v10.c2
-rw-r--r--drivers/net/dummy.c15
-rw-r--r--drivers/net/ethernet/3com/3c501.c2
-rw-r--r--drivers/net/ethernet/8390/Kconfig14
-rw-r--r--drivers/net/ethernet/8390/Makefile1
-rw-r--r--drivers/net/ethernet/8390/apne.c2
-rw-r--r--drivers/net/ethernet/8390/mcf8390.c480
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c8
-rw-r--r--drivers/net/ethernet/amd/declance.c4
-rw-r--r--drivers/net/ethernet/amd/lance.c5
-rw-r--r--drivers/net/ethernet/apple/macmace.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.c8
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c77
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c105
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_param.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c45
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c58
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.c10
-rw-r--r--drivers/net/ethernet/broadcom/b44.c96
-rw-r--r--drivers/net/ethernet/broadcom/b44.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c100
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h45
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h182
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c252
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h63
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c30
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c584
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h184
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c1230
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h53
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c279
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h168
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h128
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h42
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c68
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h2
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c53
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h13
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c284
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h47
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cee.c97
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cs.h34
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs.h63
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_cna.h15
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h35
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_status.h3
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c393
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.h43
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c48
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_msgq.c4
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi.h81
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_cna.h42
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_enet.h107
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_reg.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/bna.h51
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c15
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_hw_defs.h33
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c17
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h66
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c12
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/cna_fwimg.c4
-rw-r--r--drivers/net/ethernet/cadence/macb.c13
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c30
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c5
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h31
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c171
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h57
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h9
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c518
-rw-r--r--drivers/net/ethernet/ethoc.c6
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c9
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c11
-rw-r--r--drivers/net/ethernet/freescale/fec.c32
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c29
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c491
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c420
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/hp/hp100.c6
-rw-r--r--drivers/net/ethernet/i825xx/lp486e.c8
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c4
-rw-r--r--drivers/net/ethernet/intel/e100.c40
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c8
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c14
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c5
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c45
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c43
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h25
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c52
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c161
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c12
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c1
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c5
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h79
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c74
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c26
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c159
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c25
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c799
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c522
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c23
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c187
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c54
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h39
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c172
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h113
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c1229
-rw-r--r--drivers/net/ethernet/jme.c14
-rw-r--r--drivers/net/ethernet/lantiq_etop.c3
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c4
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c4
-rw-r--r--drivers/net/ethernet/marvell/sky2.c18
-rw-r--r--drivers/net/ethernet/marvell/sky2.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c382
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c630
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c270
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c523
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h47
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c108
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c285
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c35
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c10
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c24
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c8
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.h2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c15
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.h3
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c5
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c5
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c18
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c12
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c10
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c42
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c21
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h15
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c37
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h13
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c315
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c100
-rw-r--r--drivers/net/ethernet/rdc/r6040.c16
-rw-r--r--drivers/net/ethernet/realtek/r8169.c975
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c371
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h77
-rw-r--r--drivers/net/ethernet/sfc/efx.c10
-rw-r--r--drivers/net/ethernet/sfc/enum.h8
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon.c35
-rw-r--r--drivers/net/ethernet/sfc/falcon_xmac.c12
-rw-r--r--drivers/net/ethernet/sfc/filter.c2
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c11
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mon.c1
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h3
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h11
-rw-r--r--drivers/net/ethernet/sfc/nic.c11
-rw-r--r--drivers/net/ethernet/sfc/nic.h18
-rw-r--r--drivers/net/ethernet/sfc/rx.c23
-rw-r--r--drivers/net/ethernet/sfc/selftest.c64
-rw-r--r--drivers/net/ethernet/sfc/siena.c37
-rw-r--r--drivers/net/ethernet/sfc/tx.c93
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c4
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c6
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c6
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c19
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c101
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c57
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c196
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c12
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c2
-rw-r--r--drivers/net/ethernet/sun/sungem.c1
-rw-r--r--drivers/net/ethernet/sun/sunhme.c3
-rw-r--r--drivers/net/ethernet/sun/sunqe.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c177
-rw-r--r--drivers/net/ethernet/ti/cpsw.c25
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c167
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c25
-rw-r--r--drivers/net/ethernet/tile/tilegx.c2
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c6
-rw-r--r--drivers/net/ethernet/via/via-velocity.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c3
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/fddi/defxx.c4
-rw-r--r--drivers/net/fddi/skfp/pmf.c8
-rw-r--r--drivers/net/hamradio/mkiss.c8
-rw-r--r--drivers/net/hyperv/hyperv_net.h1
-rw-r--r--drivers/net/hyperv/netvsc.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c30
-rw-r--r--drivers/net/hyperv/rndis_filter.c79
-rw-r--r--drivers/net/irda/ali-ircc.c6
-rw-r--r--drivers/net/irda/au1k_ir.c2
-rw-r--r--drivers/net/macvtap.c8
-rw-r--r--drivers/net/phy/Kconfig5
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/amd.c8
-rw-r--r--drivers/net/phy/bcm63xx.c31
-rw-r--r--drivers/net/phy/bcm87xx.c231
-rw-r--r--drivers/net/phy/broadcom.c119
-rw-r--r--drivers/net/phy/cicada.c35
-rw-r--r--drivers/net/phy/davicom.c41
-rw-r--r--drivers/net/phy/dp83640.c23
-rw-r--r--drivers/net/phy/fixed.c4
-rw-r--r--drivers/net/phy/icplus.c31
-rw-r--r--drivers/net/phy/lxt.c47
-rw-r--r--drivers/net/phy/marvell.c22
-rw-r--r--drivers/net/phy/mdio_bus.c14
-rw-r--r--drivers/net/phy/micrel.c62
-rw-r--r--drivers/net/phy/national.c8
-rw-r--r--drivers/net/phy/phy.c316
-rw-r--r--drivers/net/phy/phy_device.c139
-rw-r--r--drivers/net/phy/realtek.c6
-rw-r--r--drivers/net/phy/smsc.c64
-rw-r--r--drivers/net/phy/spi_ks8995.c4
-rw-r--r--drivers/net/phy/ste10Xp.c21
-rw-r--r--drivers/net/phy/vitesse.c52
-rw-r--r--drivers/net/slip/slip.c4
-rw-r--r--drivers/net/team/Kconfig13
-rw-r--r--drivers/net/team/Makefile1
-rw-r--r--drivers/net/team/team.c712
-rw-r--r--drivers/net/team/team_mode_activebackup.c17
-rw-r--r--drivers/net/team/team_mode_broadcast.c87
-rw-r--r--drivers/net/team/team_mode_loadbalance.c546
-rw-r--r--drivers/net/team/team_mode_roundrobin.c13
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/usb/Kconfig1
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/asix.h218
-rw-r--r--drivers/net/usb/asix_common.c631
-rw-r--r--drivers/net/usb/asix_devices.c (renamed from drivers/net/usb/asix.c)666
-rw-r--r--drivers/net/usb/ax88172a.c414
-rw-r--r--drivers/net/usb/cdc-phonet.c4
-rw-r--r--drivers/net/usb/pegasus.c4
-rw-r--r--drivers/net/usb/qmi_wwan.c352
-rw-r--r--drivers/net/usb/smsc75xx.c2
-rw-r--r--drivers/net/usb/smsc95xx.c34
-rw-r--r--drivers/net/usb/usbnet.c77
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2
-rw-r--r--drivers/net/wan/x25_asy.c2
-rw-r--r--drivers/net/wimax/i2400m/Kconfig22
-rw-r--r--drivers/net/wimax/i2400m/Makefile8
-rw-r--r--drivers/net/wimax/i2400m/driver.c5
-rw-r--r--drivers/net/wimax/i2400m/fw.c5
-rw-r--r--drivers/net/wimax/i2400m/i2400m-sdio.h157
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h13
-rw-r--r--drivers/net/wimax/i2400m/sdio-debug-levels.h22
-rw-r--r--drivers/net/wimax/i2400m/sdio-fw.c210
-rw-r--r--drivers/net/wimax/i2400m/sdio-rx.c301
-rw-r--r--drivers/net/wimax/i2400m/sdio-tx.c177
-rw-r--r--drivers/net/wimax/i2400m/sdio.c602
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/Makefile2
-rw-r--r--drivers/net/wireless/adm8211.c5
-rw-r--r--drivers/net/wireless/airo.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/Kconfig8
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c17
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c230
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h41
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c28
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c28
-rw-r--r--drivers/net/wireless/ath/ath6kl/target.h1
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c48
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c148
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h58
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile4
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c489
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.h32
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c776
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c176
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c33
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c87
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c734
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.h40
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c120
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h38
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h53
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h882
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9340_initvals.h755
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h1404
-rw-r--r--drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h1284
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h772
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h55
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c69
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c122
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c78
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c269
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h23
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c510
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c753
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c244
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.h11
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c771
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h26
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c63
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h11
-rw-r--r--drivers/net/wireless/ath/carl9170/cmd.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/fwdesc.h3
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c6
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c53
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c69
-rw-r--r--drivers/net/wireless/ath/carl9170/version.h8
-rw-r--r--drivers/net/wireless/atmel.c4
-rw-r--r--drivers/net/wireless/b43/phy_n.c17
-rw-r--r--drivers/net/wireless/b43legacy/dma.c2
-rw-r--r--drivers/net/wireless/b43legacy/main.c2
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h8
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c29
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c126
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h59
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c9
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c669
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c17
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.c131
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.h18
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ampdu.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.c1223
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.h4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/dma.c17
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c19
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c125
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c22
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c142
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pmu.c172
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pmu.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pub.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmutil/utils.c2
-rw-r--r--drivers/net/wireless/brcm80211/include/brcm_hw_ids.h1
-rw-r--r--drivers/net/wireless/brcm80211/include/soc.h62
-rw-r--r--drivers/net/wireless/hostap/hostap_proc.c3
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c27
-rw-r--r--drivers/net/wireless/iwlegacy/3945-rs.c2
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c13
-rw-r--r--drivers/net/wireless/iwlegacy/common.c7
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig5
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile32
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/Makefile13
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h (renamed from drivers/net/wireless/iwlwifi/iwl-agn.h)113
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-calib.c)24
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.h (renamed from drivers/net/wireless/iwlwifi/iwl-agn-calib.h)4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/commands.h (renamed from drivers/net/wireless/iwlwifi/iwl-commands.h)47
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c (renamed from drivers/net/wireless/iwlwifi/iwl-debugfs.c)37
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h (renamed from drivers/net/wireless/iwlwifi/iwl-dev.h)192
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/devices.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-devices.c)191
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/led.c (renamed from drivers/net/wireless/iwlwifi/iwl-led.c)5
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/led.h (renamed from drivers/net/wireless/iwlwifi/iwl-led.h)0
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/lib.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-lib.c)22
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c (renamed from drivers/net/wireless/iwlwifi/iwl-mac80211.c)201
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn.c)503
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/power.c (renamed from drivers/net/wireless/iwlwifi/iwl-power.c)11
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/power.h (renamed from drivers/net/wireless/iwlwifi/iwl-power.h)2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-rs.c)50
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.h (renamed from drivers/net/wireless/iwlwifi/iwl-agn-rs.h)3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rx.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-rx.c)34
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-rxon.c)52
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/scan.c (renamed from drivers/net/wireless/iwlwifi/iwl-scan.c)195
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/sta.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-sta.c)60
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/testmode.c471
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tt.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-tt.c)13
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tt.h (renamed from drivers/net/wireless/iwlwifi/iwl-agn-tt.h)2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-tx.c)62
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c (renamed from drivers/net/wireless/iwlwifi/iwl-ucode.c)71
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h28
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h28
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c152
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c903
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h138
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-read.c463
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-read.h70
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c1148
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h269
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c53
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-test.c856
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-test.h161
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-testmode.c1114
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h81
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/1000.c (renamed from drivers/net/wireless/iwlwifi/iwl-1000.c)19
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/2000.c (renamed from drivers/net/wireless/iwlwifi/iwl-2000.c)22
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/5000.c (renamed from drivers/net/wireless/iwlwifi/iwl-5000.c)20
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/6000.c (renamed from drivers/net/wireless/iwlwifi/iwl-6000.c)25
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/cfg.h (renamed from drivers/net/wireless/iwlwifi/iwl-cfg.h)0
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c (renamed from drivers/net/wireless/iwlwifi/iwl-pci.c)5
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h (renamed from drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h)25
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c (renamed from drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c)106
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c (renamed from drivers/net/wireless/iwlwifi/iwl-trans-pcie.c)386
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c (renamed from drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c)200
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Kconfig39
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Makefile10
-rw-r--r--drivers/net/wireless/iwmc3200wifi/bus.h57
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.c882
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.h31
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.c1002
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.h509
-rw-r--r--drivers/net/wireless/iwmc3200wifi/debug.h123
-rw-r--r--drivers/net/wireless/iwmc3200wifi/debugfs.c488
-rw-r--r--drivers/net/wireless/iwmc3200wifi/eeprom.c234
-rw-r--r--drivers/net/wireless/iwmc3200wifi/eeprom.h127
-rw-r--r--drivers/net/wireless/iwmc3200wifi/fw.c416
-rw-r--r--drivers/net/wireless/iwmc3200wifi/fw.h100
-rw-r--r--drivers/net/wireless/iwmc3200wifi/hal.c470
-rw-r--r--drivers/net/wireless/iwmc3200wifi/hal.h237
-rw-r--r--drivers/net/wireless/iwmc3200wifi/iwm.h367
-rw-r--r--drivers/net/wireless/iwmc3200wifi/lmac.h484
-rw-r--r--drivers/net/wireless/iwmc3200wifi/main.c847
-rw-r--r--drivers/net/wireless/iwmc3200wifi/netdev.c191
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c1701
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.h60
-rw-r--r--drivers/net/wireless/iwmc3200wifi/sdio.c509
-rw-r--r--drivers/net/wireless/iwmc3200wifi/sdio.h64
-rw-r--r--drivers/net/wireless/iwmc3200wifi/trace.c3
-rw-r--r--drivers/net/wireless/iwmc3200wifi/trace.h283
-rw-r--r--drivers/net/wireless/iwmc3200wifi/tx.c529
-rw-r--r--drivers/net/wireless/iwmc3200wifi/umac.h789
-rw-r--r--drivers/net/wireless/libertas/cfg.c39
-rw-r--r--drivers/net/wireless/libertas/debugfs.c4
-rw-r--r--drivers/net/wireless/libertas/dev.h1
-rw-r--r--drivers/net/wireless/libertas/host.h1
-rw-r--r--drivers/net/wireless/libertas/if_usb.c2
-rw-r--r--drivers/net/wireless/libertas/mesh.c7
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c24
-rw-r--r--drivers/net/wireless/mwifiex/11n.c14
-rw-r--r--drivers/net/wireless/mwifiex/11n.h3
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c18
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c141
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c7
-rw-r--r--drivers/net/wireless/mwifiex/fw.h44
-rw-r--r--drivers/net/wireless/mwifiex/ie.c190
-rw-r--r--drivers/net/wireless/mwifiex/init.c66
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h15
-rw-r--r--drivers/net/wireless/mwifiex/join.c20
-rw-r--r--drivers/net/wireless/mwifiex/main.c7
-rw-r--r--drivers/net/wireless/mwifiex/main.h21
-rw-r--r--drivers/net/wireless/mwifiex/scan.c108
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c76
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c81
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c2
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c3
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c303
-rw-r--r--drivers/net/wireless/orinoco/cfg.c9
-rw-r--r--drivers/net/wireless/p54/eeprom.c4
-rw-r--r--drivers/net/wireless/p54/fwio.c2
-rw-r--r--drivers/net/wireless/p54/txrx.c6
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c2
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig8
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h181
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c388
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c83
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c14
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c9
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h5
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c20
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c2
-rw-r--r--drivers/net/wireless/rtlwifi/base.c2
-rw-r--r--drivers/net/wireless/rtlwifi/cam.c7
-rw-r--r--drivers/net/wireless/rtlwifi/core.c14
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c4
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c31
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c10
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c43
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c43
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/fw.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.c34
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/dm.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.c46
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/phy.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c2
-rw-r--r--drivers/net/wireless/ti/Kconfig1
-rw-r--r--drivers/net/wireless/ti/Makefile1
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.c9
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c67
-rw-r--r--drivers/net/wireless/ti/wl1251/wl1251.h1
-rw-r--r--drivers/net/wireless/ti/wl12xx/Makefile2
-rw-r--r--drivers/net/wireless/ti/wl12xx/acx.h237
-rw-r--r--drivers/net/wireless/ti/wl12xx/cmd.c24
-rw-r--r--drivers/net/wireless/ti/wl12xx/debugfs.c243
-rw-r--r--drivers/net/wireless/ti/wl12xx/debugfs.h28
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c598
-rw-r--r--drivers/net/wireless/ti/wl12xx/wl12xx.h22
-rw-r--r--drivers/net/wireless/ti/wl18xx/Kconfig7
-rw-r--r--drivers/net/wireless/ti/wl18xx/Makefile3
-rw-r--r--drivers/net/wireless/ti/wl18xx/acx.c111
-rw-r--r--drivers/net/wireless/ti/wl18xx/acx.h287
-rw-r--r--drivers/net/wireless/ti/wl18xx/conf.h92
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.c403
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.h28
-rw-r--r--drivers/net/wireless/ti/wl18xx/io.c75
-rw-r--r--drivers/net/wireless/ti/wl18xx/io.h28
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c1557
-rw-r--r--drivers/net/wireless/ti/wl18xx/reg.h191
-rw-r--r--drivers/net/wireless/ti/wl18xx/tx.c127
-rw-r--r--drivers/net/wireless/ti/wl18xx/tx.h46
-rw-r--r--drivers/net/wireless/ti/wl18xx/wl18xx.h95
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c16
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.h259
-rw-r--r--drivers/net/wireless/ti/wlcore/boot.c184
-rw-r--r--drivers/net/wireless/ti/wlcore/boot.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c161
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h40
-rw-r--r--drivers/net/wireless/ti/wlcore/conf.h99
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c643
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.h87
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c39
-rw-r--r--drivers/net/wireless/ti/wlcore/hw_ops.h90
-rw-r--r--drivers/net/wireless/ti/wlcore/ini.h22
-rw-r--r--drivers/net/wireless/ti/wlcore/init.c62
-rw-r--r--drivers/net/wireless/ti/wlcore/io.c61
-rw-r--r--drivers/net/wireless/ti/wlcore/io.h145
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c879
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c37
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c50
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.h15
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c61
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.h19
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c91
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c20
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c27
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c274
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.h53
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h117
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h (renamed from drivers/net/wireless/ti/wlcore/wl12xx.h)67
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.h2
-rw-r--r--drivers/net/xen-netback/netback.c4
-rw-r--r--drivers/nfc/nfcwilink.c7
-rw-r--r--drivers/nfc/pn533.c846
-rw-r--r--drivers/nfc/pn544_hci.c47
-rw-r--r--drivers/of/of_mdio.c16
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/s390/net/qeth_l3_main.c5
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c44
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_hsi.h16
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h58
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c35
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c40
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c11
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c3
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c5
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c12
-rw-r--r--drivers/scsi/scsi_netlink.c7
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c9
-rw-r--r--drivers/ssb/b43_pci_bridge.c1
-rw-r--r--drivers/ssb/scan.c2
-rw-r--r--drivers/staging/gdm72xx/netlink_k.c16
-rw-r--r--drivers/usb/atm/xusbatm.c4
-rw-r--r--drivers/usb/gadget/u_ether.c2
728 files changed, 38071 insertions, 37057 deletions
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index b81755bb4798..f6589eb7c45f 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -10,6 +10,15 @@
10 10
11#define BCMA_CORE_SIZE 0x1000 11#define BCMA_CORE_SIZE 0x1000
12 12
13#define bcma_err(bus, fmt, ...) \
14 pr_err("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
15#define bcma_warn(bus, fmt, ...) \
16 pr_warn("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
17#define bcma_info(bus, fmt, ...) \
18 pr_info("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
19#define bcma_debug(bus, fmt, ...) \
20 pr_debug("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
21
13struct bcma_bus; 22struct bcma_bus;
14 23
15/* main.c */ 24/* main.c */
diff --git a/drivers/bcma/core.c b/drivers/bcma/core.c
index bc6e89212ad3..63c8b470536f 100644
--- a/drivers/bcma/core.c
+++ b/drivers/bcma/core.c
@@ -75,7 +75,7 @@ void bcma_core_set_clockmode(struct bcma_device *core,
75 udelay(10); 75 udelay(10);
76 } 76 }
77 if (i) 77 if (i)
78 pr_err("HT force timeout\n"); 78 bcma_err(core->bus, "HT force timeout\n");
79 break; 79 break;
80 case BCMA_CLKMODE_DYNAMIC: 80 case BCMA_CLKMODE_DYNAMIC:
81 bcma_set32(core, BCMA_CLKCTLST, ~BCMA_CLKCTLST_FORCEHT); 81 bcma_set32(core, BCMA_CLKCTLST, ~BCMA_CLKCTLST_FORCEHT);
@@ -102,9 +102,9 @@ void bcma_core_pll_ctl(struct bcma_device *core, u32 req, u32 status, bool on)
102 udelay(10); 102 udelay(10);
103 } 103 }
104 if (i) 104 if (i)
105 pr_err("PLL enable timeout\n"); 105 bcma_err(core->bus, "PLL enable timeout\n");
106 } else { 106 } else {
107 pr_warn("Disabling PLL not supported yet!\n"); 107 bcma_warn(core->bus, "Disabling PLL not supported yet!\n");
108 } 108 }
109} 109}
110EXPORT_SYMBOL_GPL(bcma_core_pll_ctl); 110EXPORT_SYMBOL_GPL(bcma_core_pll_ctl);
@@ -120,8 +120,8 @@ u32 bcma_core_dma_translation(struct bcma_device *core)
120 else 120 else
121 return BCMA_DMA_TRANSLATION_DMA32_CMT; 121 return BCMA_DMA_TRANSLATION_DMA32_CMT;
122 default: 122 default:
123 pr_err("DMA translation unknown for host %d\n", 123 bcma_err(core->bus, "DMA translation unknown for host %d\n",
124 core->bus->hosttype); 124 core->bus->hosttype);
125 } 125 }
126 return BCMA_DMA_TRANSLATION_NONE; 126 return BCMA_DMA_TRANSLATION_NONE;
127} 127}
diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
index e9f1b3fd252c..a4c3ebcc4c86 100644
--- a/drivers/bcma/driver_chipcommon.c
+++ b/drivers/bcma/driver_chipcommon.c
@@ -44,7 +44,7 @@ void bcma_core_chipcommon_init(struct bcma_drv_cc *cc)
44 if (cc->capabilities & BCMA_CC_CAP_PMU) 44 if (cc->capabilities & BCMA_CC_CAP_PMU)
45 bcma_pmu_init(cc); 45 bcma_pmu_init(cc);
46 if (cc->capabilities & BCMA_CC_CAP_PCTL) 46 if (cc->capabilities & BCMA_CC_CAP_PCTL)
47 pr_err("Power control not implemented!\n"); 47 bcma_err(cc->core->bus, "Power control not implemented!\n");
48 48
49 if (cc->core->id.rev >= 16) { 49 if (cc->core->id.rev >= 16) {
50 if (cc->core->bus->sprom.leddc_on_time && 50 if (cc->core->bus->sprom.leddc_on_time &&
@@ -137,8 +137,7 @@ void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
137 | BCMA_CC_CORECTL_UARTCLKEN); 137 | BCMA_CC_CORECTL_UARTCLKEN);
138 } 138 }
139 } else { 139 } else {
140 pr_err("serial not supported on this device ccrev: 0x%x\n", 140 bcma_err(cc->core->bus, "serial not supported on this device ccrev: 0x%x\n", ccrev);
141 ccrev);
142 return; 141 return;
143 } 142 }
144 143
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
index 61ce4054b3c3..44326178db29 100644
--- a/drivers/bcma/driver_chipcommon_pmu.c
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -3,7 +3,8 @@
3 * ChipCommon Power Management Unit driver 3 * ChipCommon Power Management Unit driver
4 * 4 *
5 * Copyright 2009, Michael Buesch <m@bues.ch> 5 * Copyright 2009, Michael Buesch <m@bues.ch>
6 * Copyright 2007, Broadcom Corporation 6 * Copyright 2007, 2011, Broadcom Corporation
7 * Copyright 2011, 2012, Hauke Mehrtens <hauke@hauke-m.de>
7 * 8 *
8 * Licensed under the GNU/GPL. See COPYING for details. 9 * Licensed under the GNU/GPL. See COPYING for details.
9 */ 10 */
@@ -54,39 +55,19 @@ void bcma_chipco_regctl_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask,
54} 55}
55EXPORT_SYMBOL_GPL(bcma_chipco_regctl_maskset); 56EXPORT_SYMBOL_GPL(bcma_chipco_regctl_maskset);
56 57
57static void bcma_pmu_pll_init(struct bcma_drv_cc *cc)
58{
59 struct bcma_bus *bus = cc->core->bus;
60
61 switch (bus->chipinfo.id) {
62 case 0x4313:
63 case 0x4331:
64 case 43224:
65 case 43225:
66 break;
67 default:
68 pr_err("PLL init unknown for device 0x%04X\n",
69 bus->chipinfo.id);
70 }
71}
72
73static void bcma_pmu_resources_init(struct bcma_drv_cc *cc) 58static void bcma_pmu_resources_init(struct bcma_drv_cc *cc)
74{ 59{
75 struct bcma_bus *bus = cc->core->bus; 60 struct bcma_bus *bus = cc->core->bus;
76 u32 min_msk = 0, max_msk = 0; 61 u32 min_msk = 0, max_msk = 0;
77 62
78 switch (bus->chipinfo.id) { 63 switch (bus->chipinfo.id) {
79 case 0x4313: 64 case BCMA_CHIP_ID_BCM4313:
80 min_msk = 0x200D; 65 min_msk = 0x200D;
81 max_msk = 0xFFFF; 66 max_msk = 0xFFFF;
82 break; 67 break;
83 case 0x4331:
84 case 43224:
85 case 43225:
86 break;
87 default: 68 default:
88 pr_err("PMU resource config unknown for device 0x%04X\n", 69 bcma_debug(bus, "PMU resource config unknown or not needed for device 0x%04X\n",
89 bus->chipinfo.id); 70 bus->chipinfo.id);
90 } 71 }
91 72
92 /* Set the resource masks. */ 73 /* Set the resource masks. */
@@ -94,22 +75,9 @@ static void bcma_pmu_resources_init(struct bcma_drv_cc *cc)
94 bcma_cc_write32(cc, BCMA_CC_PMU_MINRES_MSK, min_msk); 75 bcma_cc_write32(cc, BCMA_CC_PMU_MINRES_MSK, min_msk);
95 if (max_msk) 76 if (max_msk)
96 bcma_cc_write32(cc, BCMA_CC_PMU_MAXRES_MSK, max_msk); 77 bcma_cc_write32(cc, BCMA_CC_PMU_MAXRES_MSK, max_msk);
97}
98
99void bcma_pmu_swreg_init(struct bcma_drv_cc *cc)
100{
101 struct bcma_bus *bus = cc->core->bus;
102 78
103 switch (bus->chipinfo.id) { 79 /* Add some delay; allow resources to come up and settle. */
104 case 0x4313: 80 mdelay(2);
105 case 0x4331:
106 case 43224:
107 case 43225:
108 break;
109 default:
110 pr_err("PMU switch/regulators init unknown for device "
111 "0x%04X\n", bus->chipinfo.id);
112 }
113} 81}
114 82
115/* Disable to allow reading SPROM. Don't know the adventages of enabling it. */ 83/* Disable to allow reading SPROM. Don't know the adventages of enabling it. */
@@ -123,8 +91,11 @@ void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable)
123 val |= BCMA_CHIPCTL_4331_EXTPA_EN; 91 val |= BCMA_CHIPCTL_4331_EXTPA_EN;
124 if (bus->chipinfo.pkg == 9 || bus->chipinfo.pkg == 11) 92 if (bus->chipinfo.pkg == 9 || bus->chipinfo.pkg == 11)
125 val |= BCMA_CHIPCTL_4331_EXTPA_ON_GPIO2_5; 93 val |= BCMA_CHIPCTL_4331_EXTPA_ON_GPIO2_5;
94 else if (bus->chipinfo.rev > 0)
95 val |= BCMA_CHIPCTL_4331_EXTPA_EN2;
126 } else { 96 } else {
127 val &= ~BCMA_CHIPCTL_4331_EXTPA_EN; 97 val &= ~BCMA_CHIPCTL_4331_EXTPA_EN;
98 val &= ~BCMA_CHIPCTL_4331_EXTPA_EN2;
128 val &= ~BCMA_CHIPCTL_4331_EXTPA_ON_GPIO2_5; 99 val &= ~BCMA_CHIPCTL_4331_EXTPA_ON_GPIO2_5;
129 } 100 }
130 bcma_cc_write32(cc, BCMA_CC_CHIPCTL, val); 101 bcma_cc_write32(cc, BCMA_CC_CHIPCTL, val);
@@ -135,28 +106,38 @@ void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
135 struct bcma_bus *bus = cc->core->bus; 106 struct bcma_bus *bus = cc->core->bus;
136 107
137 switch (bus->chipinfo.id) { 108 switch (bus->chipinfo.id) {
138 case 0x4313: 109 case BCMA_CHIP_ID_BCM4313:
139 bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x7); 110 /* enable 12 mA drive strenth for 4313 and set chipControl
111 register bit 1 */
112 bcma_chipco_chipctl_maskset(cc, 0,
113 BCMA_CCTRL_4313_12MA_LED_DRIVE,
114 BCMA_CCTRL_4313_12MA_LED_DRIVE);
140 break; 115 break;
141 case 0x4331: 116 case BCMA_CHIP_ID_BCM4331:
142 case 43431: 117 case BCMA_CHIP_ID_BCM43431:
143 /* Ext PA lines must be enabled for tx on BCM4331 */ 118 /* Ext PA lines must be enabled for tx on BCM4331 */
144 bcma_chipco_bcm4331_ext_pa_lines_ctl(cc, true); 119 bcma_chipco_bcm4331_ext_pa_lines_ctl(cc, true);
145 break; 120 break;
146 case 43224: 121 case BCMA_CHIP_ID_BCM43224:
122 case BCMA_CHIP_ID_BCM43421:
123 /* enable 12 mA drive strenth for 43224 and set chipControl
124 register bit 15 */
147 if (bus->chipinfo.rev == 0) { 125 if (bus->chipinfo.rev == 0) {
148 pr_err("Workarounds for 43224 rev 0 not fully " 126 bcma_cc_maskset32(cc, BCMA_CC_CHIPCTL,
149 "implemented\n"); 127 BCMA_CCTRL_43224_GPIO_TOGGLE,
150 bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x00F000F0); 128 BCMA_CCTRL_43224_GPIO_TOGGLE);
129 bcma_chipco_chipctl_maskset(cc, 0,
130 BCMA_CCTRL_43224A0_12MA_LED_DRIVE,
131 BCMA_CCTRL_43224A0_12MA_LED_DRIVE);
151 } else { 132 } else {
152 bcma_chipco_chipctl_maskset(cc, 0, ~0, 0xF0); 133 bcma_chipco_chipctl_maskset(cc, 0,
134 BCMA_CCTRL_43224B0_12MA_LED_DRIVE,
135 BCMA_CCTRL_43224B0_12MA_LED_DRIVE);
153 } 136 }
154 break; 137 break;
155 case 43225:
156 break;
157 default: 138 default:
158 pr_err("Workarounds unknown for device 0x%04X\n", 139 bcma_debug(bus, "Workarounds unknown or not needed for device 0x%04X\n",
159 bus->chipinfo.id); 140 bus->chipinfo.id);
160 } 141 }
161} 142}
162 143
@@ -167,8 +148,8 @@ void bcma_pmu_init(struct bcma_drv_cc *cc)
167 pmucap = bcma_cc_read32(cc, BCMA_CC_PMU_CAP); 148 pmucap = bcma_cc_read32(cc, BCMA_CC_PMU_CAP);
168 cc->pmu.rev = (pmucap & BCMA_CC_PMU_CAP_REVISION); 149 cc->pmu.rev = (pmucap & BCMA_CC_PMU_CAP_REVISION);
169 150
170 pr_debug("Found rev %u PMU (capabilities 0x%08X)\n", cc->pmu.rev, 151 bcma_debug(cc->core->bus, "Found rev %u PMU (capabilities 0x%08X)\n",
171 pmucap); 152 cc->pmu.rev, pmucap);
172 153
173 if (cc->pmu.rev == 1) 154 if (cc->pmu.rev == 1)
174 bcma_cc_mask32(cc, BCMA_CC_PMU_CTL, 155 bcma_cc_mask32(cc, BCMA_CC_PMU_CTL,
@@ -177,12 +158,7 @@ void bcma_pmu_init(struct bcma_drv_cc *cc)
177 bcma_cc_set32(cc, BCMA_CC_PMU_CTL, 158 bcma_cc_set32(cc, BCMA_CC_PMU_CTL,
178 BCMA_CC_PMU_CTL_NOILPONW); 159 BCMA_CC_PMU_CTL_NOILPONW);
179 160
180 if (cc->core->id.id == 0x4329 && cc->core->id.rev == 2)
181 pr_err("Fix for 4329b0 bad LPOM state not implemented!\n");
182
183 bcma_pmu_pll_init(cc);
184 bcma_pmu_resources_init(cc); 161 bcma_pmu_resources_init(cc);
185 bcma_pmu_swreg_init(cc);
186 bcma_pmu_workarounds(cc); 162 bcma_pmu_workarounds(cc);
187} 163}
188 164
@@ -191,23 +167,22 @@ u32 bcma_pmu_alp_clock(struct bcma_drv_cc *cc)
191 struct bcma_bus *bus = cc->core->bus; 167 struct bcma_bus *bus = cc->core->bus;
192 168
193 switch (bus->chipinfo.id) { 169 switch (bus->chipinfo.id) {
194 case 0x4716: 170 case BCMA_CHIP_ID_BCM4716:
195 case 0x4748: 171 case BCMA_CHIP_ID_BCM4748:
196 case 47162: 172 case BCMA_CHIP_ID_BCM47162:
197 case 0x4313: 173 case BCMA_CHIP_ID_BCM4313:
198 case 0x5357: 174 case BCMA_CHIP_ID_BCM5357:
199 case 0x4749: 175 case BCMA_CHIP_ID_BCM4749:
200 case 53572: 176 case BCMA_CHIP_ID_BCM53572:
201 /* always 20Mhz */ 177 /* always 20Mhz */
202 return 20000 * 1000; 178 return 20000 * 1000;
203 case 0x5356: 179 case BCMA_CHIP_ID_BCM5356:
204 case 0x5300: 180 case BCMA_CHIP_ID_BCM4706:
205 /* always 25Mhz */ 181 /* always 25Mhz */
206 return 25000 * 1000; 182 return 25000 * 1000;
207 default: 183 default:
208 pr_warn("No ALP clock specified for %04X device, " 184 bcma_warn(bus, "No ALP clock specified for %04X device, pmu rev. %d, using default %d Hz\n",
209 "pmu rev. %d, using default %d Hz\n", 185 bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_ALP_CLOCK);
210 bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_ALP_CLOCK);
211 } 186 }
212 return BCMA_CC_PMU_ALP_CLOCK; 187 return BCMA_CC_PMU_ALP_CLOCK;
213} 188}
@@ -224,7 +199,8 @@ static u32 bcma_pmu_clock(struct bcma_drv_cc *cc, u32 pll0, u32 m)
224 199
225 BUG_ON(!m || m > 4); 200 BUG_ON(!m || m > 4);
226 201
227 if (bus->chipinfo.id == 0x5357 || bus->chipinfo.id == 0x4749) { 202 if (bus->chipinfo.id == BCMA_CHIP_ID_BCM5357 ||
203 bus->chipinfo.id == BCMA_CHIP_ID_BCM4749) {
228 /* Detect failure in clock setting */ 204 /* Detect failure in clock setting */
229 tmp = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT); 205 tmp = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT);
230 if (tmp & 0x40000) 206 if (tmp & 0x40000)
@@ -250,33 +226,62 @@ static u32 bcma_pmu_clock(struct bcma_drv_cc *cc, u32 pll0, u32 m)
250 return (fc / div) * 1000000; 226 return (fc / div) * 1000000;
251} 227}
252 228
229static u32 bcma_pmu_clock_bcm4706(struct bcma_drv_cc *cc, u32 pll0, u32 m)
230{
231 u32 tmp, ndiv, p1div, p2div;
232 u32 clock;
233
234 BUG_ON(!m || m > 4);
235
236 /* Get N, P1 and P2 dividers to determine CPU clock */
237 tmp = bcma_chipco_pll_read(cc, pll0 + BCMA_CC_PMU6_4706_PROCPLL_OFF);
238 ndiv = (tmp & BCMA_CC_PMU6_4706_PROC_NDIV_INT_MASK)
239 >> BCMA_CC_PMU6_4706_PROC_NDIV_INT_SHIFT;
240 p1div = (tmp & BCMA_CC_PMU6_4706_PROC_P1DIV_MASK)
241 >> BCMA_CC_PMU6_4706_PROC_P1DIV_SHIFT;
242 p2div = (tmp & BCMA_CC_PMU6_4706_PROC_P2DIV_MASK)
243 >> BCMA_CC_PMU6_4706_PROC_P2DIV_SHIFT;
244
245 tmp = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT);
246 if (tmp & BCMA_CC_CHIPST_4706_PKG_OPTION)
247 /* Low cost bonding: Fixed reference clock 25MHz and m = 4 */
248 clock = (25000000 / 4) * ndiv * p2div / p1div;
249 else
250 /* Fixed reference clock 25MHz and m = 2 */
251 clock = (25000000 / 2) * ndiv * p2div / p1div;
252
253 if (m == BCMA_CC_PMU5_MAINPLL_SSB)
254 clock = clock / 4;
255
256 return clock;
257}
258
253/* query bus clock frequency for PMU-enabled chipcommon */ 259/* query bus clock frequency for PMU-enabled chipcommon */
254u32 bcma_pmu_get_clockcontrol(struct bcma_drv_cc *cc) 260u32 bcma_pmu_get_clockcontrol(struct bcma_drv_cc *cc)
255{ 261{
256 struct bcma_bus *bus = cc->core->bus; 262 struct bcma_bus *bus = cc->core->bus;
257 263
258 switch (bus->chipinfo.id) { 264 switch (bus->chipinfo.id) {
259 case 0x4716: 265 case BCMA_CHIP_ID_BCM4716:
260 case 0x4748: 266 case BCMA_CHIP_ID_BCM4748:
261 case 47162: 267 case BCMA_CHIP_ID_BCM47162:
262 return bcma_pmu_clock(cc, BCMA_CC_PMU4716_MAINPLL_PLL0, 268 return bcma_pmu_clock(cc, BCMA_CC_PMU4716_MAINPLL_PLL0,
263 BCMA_CC_PMU5_MAINPLL_SSB); 269 BCMA_CC_PMU5_MAINPLL_SSB);
264 case 0x5356: 270 case BCMA_CHIP_ID_BCM5356:
265 return bcma_pmu_clock(cc, BCMA_CC_PMU5356_MAINPLL_PLL0, 271 return bcma_pmu_clock(cc, BCMA_CC_PMU5356_MAINPLL_PLL0,
266 BCMA_CC_PMU5_MAINPLL_SSB); 272 BCMA_CC_PMU5_MAINPLL_SSB);
267 case 0x5357: 273 case BCMA_CHIP_ID_BCM5357:
268 case 0x4749: 274 case BCMA_CHIP_ID_BCM4749:
269 return bcma_pmu_clock(cc, BCMA_CC_PMU5357_MAINPLL_PLL0, 275 return bcma_pmu_clock(cc, BCMA_CC_PMU5357_MAINPLL_PLL0,
270 BCMA_CC_PMU5_MAINPLL_SSB); 276 BCMA_CC_PMU5_MAINPLL_SSB);
271 case 0x5300: 277 case BCMA_CHIP_ID_BCM4706:
272 return bcma_pmu_clock(cc, BCMA_CC_PMU4706_MAINPLL_PLL0, 278 return bcma_pmu_clock_bcm4706(cc, BCMA_CC_PMU4706_MAINPLL_PLL0,
273 BCMA_CC_PMU5_MAINPLL_SSB); 279 BCMA_CC_PMU5_MAINPLL_SSB);
274 case 53572: 280 case BCMA_CHIP_ID_BCM53572:
275 return 75000000; 281 return 75000000;
276 default: 282 default:
277 pr_warn("No backplane clock specified for %04X device, " 283 bcma_warn(bus, "No backplane clock specified for %04X device, pmu rev. %d, using default %d Hz\n",
278 "pmu rev. %d, using default %d Hz\n", 284 bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_HT_CLOCK);
279 bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_HT_CLOCK);
280 } 285 }
281 return BCMA_CC_PMU_HT_CLOCK; 286 return BCMA_CC_PMU_HT_CLOCK;
282} 287}
@@ -286,17 +291,21 @@ u32 bcma_pmu_get_clockcpu(struct bcma_drv_cc *cc)
286{ 291{
287 struct bcma_bus *bus = cc->core->bus; 292 struct bcma_bus *bus = cc->core->bus;
288 293
289 if (bus->chipinfo.id == 53572) 294 if (bus->chipinfo.id == BCMA_CHIP_ID_BCM53572)
290 return 300000000; 295 return 300000000;
291 296
292 if (cc->pmu.rev >= 5) { 297 if (cc->pmu.rev >= 5) {
293 u32 pll; 298 u32 pll;
294 switch (bus->chipinfo.id) { 299 switch (bus->chipinfo.id) {
295 case 0x5356: 300 case BCMA_CHIP_ID_BCM4706:
301 return bcma_pmu_clock_bcm4706(cc,
302 BCMA_CC_PMU4706_MAINPLL_PLL0,
303 BCMA_CC_PMU5_MAINPLL_CPU);
304 case BCMA_CHIP_ID_BCM5356:
296 pll = BCMA_CC_PMU5356_MAINPLL_PLL0; 305 pll = BCMA_CC_PMU5356_MAINPLL_PLL0;
297 break; 306 break;
298 case 0x5357: 307 case BCMA_CHIP_ID_BCM5357:
299 case 0x4749: 308 case BCMA_CHIP_ID_BCM4749:
300 pll = BCMA_CC_PMU5357_MAINPLL_PLL0; 309 pll = BCMA_CC_PMU5357_MAINPLL_PLL0;
301 break; 310 break;
302 default: 311 default:
@@ -304,10 +313,188 @@ u32 bcma_pmu_get_clockcpu(struct bcma_drv_cc *cc)
304 break; 313 break;
305 } 314 }
306 315
307 /* TODO: if (bus->chipinfo.id == 0x5300)
308 return si_4706_pmu_clock(sih, osh, cc, PMU4706_MAINPLL_PLL0, PMU5_MAINPLL_CPU); */
309 return bcma_pmu_clock(cc, pll, BCMA_CC_PMU5_MAINPLL_CPU); 316 return bcma_pmu_clock(cc, pll, BCMA_CC_PMU5_MAINPLL_CPU);
310 } 317 }
311 318
312 return bcma_pmu_get_clockcontrol(cc); 319 return bcma_pmu_get_clockcontrol(cc);
313} 320}
321
322static void bcma_pmu_spuravoid_pll_write(struct bcma_drv_cc *cc, u32 offset,
323 u32 value)
324{
325 bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
326 bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, value);
327}
328
329void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid)
330{
331 u32 tmp = 0;
332 u8 phypll_offset = 0;
333 u8 bcm5357_bcm43236_p1div[] = {0x1, 0x5, 0x5};
334 u8 bcm5357_bcm43236_ndiv[] = {0x30, 0xf6, 0xfc};
335 struct bcma_bus *bus = cc->core->bus;
336
337 switch (bus->chipinfo.id) {
338 case BCMA_CHIP_ID_BCM5357:
339 case BCMA_CHIP_ID_BCM4749:
340 case BCMA_CHIP_ID_BCM53572:
341 /* 5357[ab]0, 43236[ab]0, and 6362b0 */
342
343 /* BCM5357 needs to touch PLL1_PLLCTL[02],
344 so offset PLL0_PLLCTL[02] by 6 */
345 phypll_offset = (bus->chipinfo.id == BCMA_CHIP_ID_BCM5357 ||
346 bus->chipinfo.id == BCMA_CHIP_ID_BCM4749 ||
347 bus->chipinfo.id == BCMA_CHIP_ID_BCM53572) ? 6 : 0;
348
349 /* RMW only the P1 divider */
350 bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR,
351 BCMA_CC_PMU_PLL_CTL0 + phypll_offset);
352 tmp = bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA);
353 tmp &= (~(BCMA_CC_PMU1_PLL0_PC0_P1DIV_MASK));
354 tmp |= (bcm5357_bcm43236_p1div[spuravoid] << BCMA_CC_PMU1_PLL0_PC0_P1DIV_SHIFT);
355 bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, tmp);
356
357 /* RMW only the int feedback divider */
358 bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR,
359 BCMA_CC_PMU_PLL_CTL2 + phypll_offset);
360 tmp = bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA);
361 tmp &= ~(BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_MASK);
362 tmp |= (bcm5357_bcm43236_ndiv[spuravoid]) << BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_SHIFT;
363 bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, tmp);
364
365 tmp = 1 << 10;
366 break;
367
368 case BCMA_CHIP_ID_BCM4331:
369 case BCMA_CHIP_ID_BCM43431:
370 if (spuravoid == 2) {
371 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
372 0x11500014);
373 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
374 0x0FC00a08);
375 } else if (spuravoid == 1) {
376 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
377 0x11500014);
378 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
379 0x0F600a08);
380 } else {
381 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
382 0x11100014);
383 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
384 0x03000a08);
385 }
386 tmp = 1 << 10;
387 break;
388
389 case BCMA_CHIP_ID_BCM43224:
390 case BCMA_CHIP_ID_BCM43225:
391 case BCMA_CHIP_ID_BCM43421:
392 if (spuravoid == 1) {
393 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
394 0x11500010);
395 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
396 0x000C0C06);
397 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
398 0x0F600a08);
399 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
400 0x00000000);
401 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
402 0x2001E920);
403 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
404 0x88888815);
405 } else {
406 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
407 0x11100010);
408 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
409 0x000c0c06);
410 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
411 0x03000a08);
412 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
413 0x00000000);
414 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
415 0x200005c0);
416 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
417 0x88888815);
418 }
419 tmp = 1 << 10;
420 break;
421
422 case BCMA_CHIP_ID_BCM4716:
423 case BCMA_CHIP_ID_BCM4748:
424 case BCMA_CHIP_ID_BCM47162:
425 if (spuravoid == 1) {
426 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
427 0x11500060);
428 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
429 0x080C0C06);
430 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
431 0x0F600000);
432 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
433 0x00000000);
434 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
435 0x2001E924);
436 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
437 0x88888815);
438 } else {
439 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
440 0x11100060);
441 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
442 0x080c0c06);
443 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
444 0x03000000);
445 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
446 0x00000000);
447 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
448 0x200005c0);
449 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
450 0x88888815);
451 }
452
453 tmp = 3 << 9;
454 break;
455
456 case BCMA_CHIP_ID_BCM43227:
457 case BCMA_CHIP_ID_BCM43228:
458 case BCMA_CHIP_ID_BCM43428:
459 /* LCNXN */
460 /* PLL Settings for spur avoidance on/off mode,
461 no on2 support for 43228A0 */
462 if (spuravoid == 1) {
463 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
464 0x01100014);
465 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
466 0x040C0C06);
467 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
468 0x03140A08);
469 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
470 0x00333333);
471 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
472 0x202C2820);
473 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
474 0x88888815);
475 } else {
476 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
477 0x11100014);
478 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
479 0x040c0c06);
480 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
481 0x03000a08);
482 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
483 0x00000000);
484 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
485 0x200005c0);
486 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
487 0x88888815);
488 }
489 tmp = 1 << 10;
490 break;
491 default:
492 bcma_err(bus, "Unknown spuravoidance settings for chip 0x%04X, not changing PLL\n",
493 bus->chipinfo.id);
494 break;
495 }
496
497 tmp |= bcma_cc_read32(cc, BCMA_CC_PMU_CTL);
498 bcma_cc_write32(cc, BCMA_CC_PMU_CTL, tmp);
499}
500EXPORT_SYMBOL_GPL(bcma_pmu_spuravoid_pllupdate);
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index c3e9dff4224e..ef34ed25bf00 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -22,15 +22,15 @@
22/* The 47162a0 hangs when reading MIPS DMP registers registers */ 22/* The 47162a0 hangs when reading MIPS DMP registers registers */
23static inline bool bcma_core_mips_bcm47162a0_quirk(struct bcma_device *dev) 23static inline bool bcma_core_mips_bcm47162a0_quirk(struct bcma_device *dev)
24{ 24{
25 return dev->bus->chipinfo.id == 47162 && dev->bus->chipinfo.rev == 0 && 25 return dev->bus->chipinfo.id == BCMA_CHIP_ID_BCM47162 &&
26 dev->id.id == BCMA_CORE_MIPS_74K; 26 dev->bus->chipinfo.rev == 0 && dev->id.id == BCMA_CORE_MIPS_74K;
27} 27}
28 28
29/* The 5357b0 hangs when reading USB20H DMP registers */ 29/* The 5357b0 hangs when reading USB20H DMP registers */
30static inline bool bcma_core_mips_bcm5357b0_quirk(struct bcma_device *dev) 30static inline bool bcma_core_mips_bcm5357b0_quirk(struct bcma_device *dev)
31{ 31{
32 return (dev->bus->chipinfo.id == 0x5357 || 32 return (dev->bus->chipinfo.id == BCMA_CHIP_ID_BCM5357 ||
33 dev->bus->chipinfo.id == 0x4749) && 33 dev->bus->chipinfo.id == BCMA_CHIP_ID_BCM4749) &&
34 dev->bus->chipinfo.pkg == 11 && 34 dev->bus->chipinfo.pkg == 11 &&
35 dev->id.id == BCMA_CORE_USB20_HOST; 35 dev->id.id == BCMA_CORE_USB20_HOST;
36} 36}
@@ -143,8 +143,8 @@ static void bcma_core_mips_set_irq(struct bcma_device *dev, unsigned int irq)
143 1 << irqflag); 143 1 << irqflag);
144 } 144 }
145 145
146 pr_info("set_irq: core 0x%04x, irq %d => %d\n", 146 bcma_info(bus, "set_irq: core 0x%04x, irq %d => %d\n",
147 dev->id.id, oldirq + 2, irq + 2); 147 dev->id.id, oldirq + 2, irq + 2);
148} 148}
149 149
150static void bcma_core_mips_print_irq(struct bcma_device *dev, unsigned int irq) 150static void bcma_core_mips_print_irq(struct bcma_device *dev, unsigned int irq)
@@ -173,7 +173,7 @@ u32 bcma_cpu_clock(struct bcma_drv_mips *mcore)
173 if (bus->drv_cc.capabilities & BCMA_CC_CAP_PMU) 173 if (bus->drv_cc.capabilities & BCMA_CC_CAP_PMU)
174 return bcma_pmu_get_clockcpu(&bus->drv_cc); 174 return bcma_pmu_get_clockcpu(&bus->drv_cc);
175 175
176 pr_err("No PMU available, need this to get the cpu clock\n"); 176 bcma_err(bus, "No PMU available, need this to get the cpu clock\n");
177 return 0; 177 return 0;
178} 178}
179EXPORT_SYMBOL(bcma_cpu_clock); 179EXPORT_SYMBOL(bcma_cpu_clock);
@@ -185,10 +185,10 @@ static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore)
185 switch (bus->drv_cc.capabilities & BCMA_CC_CAP_FLASHT) { 185 switch (bus->drv_cc.capabilities & BCMA_CC_CAP_FLASHT) {
186 case BCMA_CC_FLASHT_STSER: 186 case BCMA_CC_FLASHT_STSER:
187 case BCMA_CC_FLASHT_ATSER: 187 case BCMA_CC_FLASHT_ATSER:
188 pr_err("Serial flash not supported.\n"); 188 bcma_err(bus, "Serial flash not supported.\n");
189 break; 189 break;
190 case BCMA_CC_FLASHT_PARA: 190 case BCMA_CC_FLASHT_PARA:
191 pr_info("found parallel flash.\n"); 191 bcma_info(bus, "found parallel flash.\n");
192 bus->drv_cc.pflash.window = 0x1c000000; 192 bus->drv_cc.pflash.window = 0x1c000000;
193 bus->drv_cc.pflash.window_size = 0x02000000; 193 bus->drv_cc.pflash.window_size = 0x02000000;
194 194
@@ -199,7 +199,7 @@ static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore)
199 bus->drv_cc.pflash.buswidth = 2; 199 bus->drv_cc.pflash.buswidth = 2;
200 break; 200 break;
201 default: 201 default:
202 pr_err("flash not supported.\n"); 202 bcma_err(bus, "flash not supported.\n");
203 } 203 }
204} 204}
205 205
@@ -209,7 +209,7 @@ void bcma_core_mips_init(struct bcma_drv_mips *mcore)
209 struct bcma_device *core; 209 struct bcma_device *core;
210 bus = mcore->core->bus; 210 bus = mcore->core->bus;
211 211
212 pr_info("Initializing MIPS core...\n"); 212 bcma_info(bus, "Initializing MIPS core...\n");
213 213
214 if (!mcore->setup_done) 214 if (!mcore->setup_done)
215 mcore->assigned_irqs = 1; 215 mcore->assigned_irqs = 1;
@@ -244,7 +244,7 @@ void bcma_core_mips_init(struct bcma_drv_mips *mcore)
244 break; 244 break;
245 } 245 }
246 } 246 }
247 pr_info("IRQ reconfiguration done\n"); 247 bcma_info(bus, "IRQ reconfiguration done\n");
248 bcma_core_mips_dump_irq(bus); 248 bcma_core_mips_dump_irq(bus);
249 249
250 if (mcore->setup_done) 250 if (mcore->setup_done)
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index b9a86edfec39..cbae2c231336 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -36,7 +36,7 @@ bool __devinit bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc)
36 return false; 36 return false;
37 37
38 if (bus->sprom.boardflags_lo & BCMA_CORE_PCI_BFL_NOPCI) { 38 if (bus->sprom.boardflags_lo & BCMA_CORE_PCI_BFL_NOPCI) {
39 pr_info("This PCI core is disabled and not working\n"); 39 bcma_info(bus, "This PCI core is disabled and not working\n");
40 return false; 40 return false;
41 } 41 }
42 42
@@ -215,7 +215,8 @@ static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev,
215 } else { 215 } else {
216 writel(val, mmio); 216 writel(val, mmio);
217 217
218 if (chipid == 0x4716 || chipid == 0x4748) 218 if (chipid == BCMA_CHIP_ID_BCM4716 ||
219 chipid == BCMA_CHIP_ID_BCM4748)
219 readl(mmio); 220 readl(mmio);
220 } 221 }
221 222
@@ -340,6 +341,7 @@ static u8 __devinit bcma_find_pci_capability(struct bcma_drv_pci *pc,
340 */ 341 */
341static void __devinit bcma_core_pci_enable_crs(struct bcma_drv_pci *pc) 342static void __devinit bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
342{ 343{
344 struct bcma_bus *bus = pc->core->bus;
343 u8 cap_ptr, root_ctrl, root_cap, dev; 345 u8 cap_ptr, root_ctrl, root_cap, dev;
344 u16 val16; 346 u16 val16;
345 int i; 347 int i;
@@ -378,7 +380,8 @@ static void __devinit bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
378 udelay(10); 380 udelay(10);
379 } 381 }
380 if (val16 == 0x1) 382 if (val16 == 0x1)
381 pr_err("PCI: Broken device in slot %d\n", dev); 383 bcma_err(bus, "PCI: Broken device in slot %d\n",
384 dev);
382 } 385 }
383 } 386 }
384} 387}
@@ -391,11 +394,11 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
391 u32 pci_membase_1G; 394 u32 pci_membase_1G;
392 unsigned long io_map_base; 395 unsigned long io_map_base;
393 396
394 pr_info("PCIEcore in host mode found\n"); 397 bcma_info(bus, "PCIEcore in host mode found\n");
395 398
396 pc_host = kzalloc(sizeof(*pc_host), GFP_KERNEL); 399 pc_host = kzalloc(sizeof(*pc_host), GFP_KERNEL);
397 if (!pc_host) { 400 if (!pc_host) {
398 pr_err("can not allocate memory"); 401 bcma_err(bus, "can not allocate memory");
399 return; 402 return;
400 } 403 }
401 404
@@ -434,13 +437,14 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
434 * as mips can't generate 64-bit address on the 437 * as mips can't generate 64-bit address on the
435 * backplane. 438 * backplane.
436 */ 439 */
437 if (bus->chipinfo.id == 0x4716 || bus->chipinfo.id == 0x4748) { 440 if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4716 ||
441 bus->chipinfo.id == BCMA_CHIP_ID_BCM4748) {
438 pc_host->mem_resource.start = BCMA_SOC_PCI_MEM; 442 pc_host->mem_resource.start = BCMA_SOC_PCI_MEM;
439 pc_host->mem_resource.end = BCMA_SOC_PCI_MEM + 443 pc_host->mem_resource.end = BCMA_SOC_PCI_MEM +
440 BCMA_SOC_PCI_MEM_SZ - 1; 444 BCMA_SOC_PCI_MEM_SZ - 1;
441 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0, 445 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
442 BCMA_CORE_PCI_SBTOPCI_MEM | BCMA_SOC_PCI_MEM); 446 BCMA_CORE_PCI_SBTOPCI_MEM | BCMA_SOC_PCI_MEM);
443 } else if (bus->chipinfo.id == 0x5300) { 447 } else if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
444 tmp = BCMA_CORE_PCI_SBTOPCI_MEM; 448 tmp = BCMA_CORE_PCI_SBTOPCI_MEM;
445 tmp |= BCMA_CORE_PCI_SBTOPCI_PREF; 449 tmp |= BCMA_CORE_PCI_SBTOPCI_PREF;
446 tmp |= BCMA_CORE_PCI_SBTOPCI_BURST; 450 tmp |= BCMA_CORE_PCI_SBTOPCI_BURST;
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index 6c05cf470f96..11b32d2642df 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -18,7 +18,7 @@ static void bcma_host_pci_switch_core(struct bcma_device *core)
18 pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN2, 18 pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN2,
19 core->wrap); 19 core->wrap);
20 core->bus->mapped_core = core; 20 core->bus->mapped_core = core;
21 pr_debug("Switched to core: 0x%X\n", core->id.id); 21 bcma_debug(core->bus, "Switched to core: 0x%X\n", core->id.id);
22} 22}
23 23
24/* Provides access to the requested core. Returns base offset that has to be 24/* Provides access to the requested core. Returns base offset that has to be
@@ -188,7 +188,7 @@ static int __devinit bcma_host_pci_probe(struct pci_dev *dev,
188 188
189 /* SSB needed additional powering up, do we have any AMBA PCI cards? */ 189 /* SSB needed additional powering up, do we have any AMBA PCI cards? */
190 if (!pci_is_pcie(dev)) 190 if (!pci_is_pcie(dev))
191 pr_err("PCI card detected, report problems.\n"); 191 bcma_err(bus, "PCI card detected, report problems.\n");
192 192
193 /* Map MMIO */ 193 /* Map MMIO */
194 err = -ENOMEM; 194 err = -ENOMEM;
@@ -268,6 +268,7 @@ static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend,
268 268
269static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = { 269static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
270 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) }, 270 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
271 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43224) },
271 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) }, 272 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
272 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) }, 273 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
273 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) }, 274 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 7e138ec21357..7ff4bac6f9e1 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -118,8 +118,9 @@ static int bcma_register_cores(struct bcma_bus *bus)
118 118
119 err = device_register(&core->dev); 119 err = device_register(&core->dev);
120 if (err) { 120 if (err) {
121 pr_err("Could not register dev for core 0x%03X\n", 121 bcma_err(bus,
122 core->id.id); 122 "Could not register dev for core 0x%03X\n",
123 core->id.id);
123 continue; 124 continue;
124 } 125 }
125 core->dev_registered = true; 126 core->dev_registered = true;
@@ -151,7 +152,7 @@ int __devinit bcma_bus_register(struct bcma_bus *bus)
151 /* Scan for devices (cores) */ 152 /* Scan for devices (cores) */
152 err = bcma_bus_scan(bus); 153 err = bcma_bus_scan(bus);
153 if (err) { 154 if (err) {
154 pr_err("Failed to scan: %d\n", err); 155 bcma_err(bus, "Failed to scan: %d\n", err);
155 return -1; 156 return -1;
156 } 157 }
157 158
@@ -179,14 +180,14 @@ int __devinit bcma_bus_register(struct bcma_bus *bus)
179 /* Try to get SPROM */ 180 /* Try to get SPROM */
180 err = bcma_sprom_get(bus); 181 err = bcma_sprom_get(bus);
181 if (err == -ENOENT) { 182 if (err == -ENOENT) {
182 pr_err("No SPROM available\n"); 183 bcma_err(bus, "No SPROM available\n");
183 } else if (err) 184 } else if (err)
184 pr_err("Failed to get SPROM: %d\n", err); 185 bcma_err(bus, "Failed to get SPROM: %d\n", err);
185 186
186 /* Register found cores */ 187 /* Register found cores */
187 bcma_register_cores(bus); 188 bcma_register_cores(bus);
188 189
189 pr_info("Bus registered\n"); 190 bcma_info(bus, "Bus registered\n");
190 191
191 return 0; 192 return 0;
192} 193}
@@ -214,7 +215,7 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
214 /* Scan for chip common core */ 215 /* Scan for chip common core */
215 err = bcma_bus_scan_early(bus, &match, core_cc); 216 err = bcma_bus_scan_early(bus, &match, core_cc);
216 if (err) { 217 if (err) {
217 pr_err("Failed to scan for common core: %d\n", err); 218 bcma_err(bus, "Failed to scan for common core: %d\n", err);
218 return -1; 219 return -1;
219 } 220 }
220 221
@@ -226,7 +227,7 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
226 /* Scan for mips core */ 227 /* Scan for mips core */
227 err = bcma_bus_scan_early(bus, &match, core_mips); 228 err = bcma_bus_scan_early(bus, &match, core_mips);
228 if (err) { 229 if (err) {
229 pr_err("Failed to scan for mips core: %d\n", err); 230 bcma_err(bus, "Failed to scan for mips core: %d\n", err);
230 return -1; 231 return -1;
231 } 232 }
232 233
@@ -244,7 +245,7 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
244 bcma_core_mips_init(&bus->drv_mips); 245 bcma_core_mips_init(&bus->drv_mips);
245 } 246 }
246 247
247 pr_info("Early bus registered\n"); 248 bcma_info(bus, "Early bus registered\n");
248 249
249 return 0; 250 return 0;
250} 251}
diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c
index 5ed0718fc660..a0272bbfc4f6 100644
--- a/drivers/bcma/scan.c
+++ b/drivers/bcma/scan.c
@@ -28,6 +28,12 @@ static const struct bcma_device_id_name bcma_arm_device_names[] = {
28 28
29static const struct bcma_device_id_name bcma_bcm_device_names[] = { 29static const struct bcma_device_id_name bcma_bcm_device_names[] = {
30 { BCMA_CORE_OOB_ROUTER, "OOB Router" }, 30 { BCMA_CORE_OOB_ROUTER, "OOB Router" },
31 { BCMA_CORE_4706_CHIPCOMMON, "BCM4706 ChipCommon" },
32 { BCMA_CORE_4706_SOC_RAM, "BCM4706 SOC RAM" },
33 { BCMA_CORE_4706_MAC_GBIT, "BCM4706 GBit MAC" },
34 { BCMA_CORE_AMEMC, "AMEMC (DDR)" },
35 { BCMA_CORE_ALTA, "ALTA (I2S)" },
36 { BCMA_CORE_4706_MAC_GBIT_COMMON, "BCM4706 GBit MAC Common" },
31 { BCMA_CORE_INVALID, "Invalid" }, 37 { BCMA_CORE_INVALID, "Invalid" },
32 { BCMA_CORE_CHIPCOMMON, "ChipCommon" }, 38 { BCMA_CORE_CHIPCOMMON, "ChipCommon" },
33 { BCMA_CORE_ILINE20, "ILine 20" }, 39 { BCMA_CORE_ILINE20, "ILine 20" },
@@ -334,7 +340,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
334 if (tmp <= 0) { 340 if (tmp <= 0) {
335 return -EILSEQ; 341 return -EILSEQ;
336 } else { 342 } else {
337 pr_info("Bridge found\n"); 343 bcma_info(bus, "Bridge found\n");
338 return -ENXIO; 344 return -ENXIO;
339 } 345 }
340 } 346 }
@@ -421,8 +427,8 @@ void bcma_init_bus(struct bcma_bus *bus)
421 chipinfo->id = (tmp & BCMA_CC_ID_ID) >> BCMA_CC_ID_ID_SHIFT; 427 chipinfo->id = (tmp & BCMA_CC_ID_ID) >> BCMA_CC_ID_ID_SHIFT;
422 chipinfo->rev = (tmp & BCMA_CC_ID_REV) >> BCMA_CC_ID_REV_SHIFT; 428 chipinfo->rev = (tmp & BCMA_CC_ID_REV) >> BCMA_CC_ID_REV_SHIFT;
423 chipinfo->pkg = (tmp & BCMA_CC_ID_PKG) >> BCMA_CC_ID_PKG_SHIFT; 429 chipinfo->pkg = (tmp & BCMA_CC_ID_PKG) >> BCMA_CC_ID_PKG_SHIFT;
424 pr_info("Found chip with id 0x%04X, rev 0x%02X and package 0x%02X\n", 430 bcma_info(bus, "Found chip with id 0x%04X, rev 0x%02X and package 0x%02X\n",
425 chipinfo->id, chipinfo->rev, chipinfo->pkg); 431 chipinfo->id, chipinfo->rev, chipinfo->pkg);
426 432
427 bus->init_done = true; 433 bus->init_done = true;
428} 434}
@@ -476,11 +482,10 @@ int bcma_bus_scan(struct bcma_bus *bus)
476 other_core = bcma_find_core_reverse(bus, core->id.id); 482 other_core = bcma_find_core_reverse(bus, core->id.id);
477 core->core_unit = (other_core == NULL) ? 0 : other_core->core_unit + 1; 483 core->core_unit = (other_core == NULL) ? 0 : other_core->core_unit + 1;
478 484
479 pr_info("Core %d found: %s " 485 bcma_info(bus, "Core %d found: %s (manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n",
480 "(manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n", 486 core->core_index, bcma_device_name(&core->id),
481 core->core_index, bcma_device_name(&core->id), 487 core->id.manuf, core->id.id, core->id.rev,
482 core->id.manuf, core->id.id, core->id.rev, 488 core->id.class);
483 core->id.class);
484 489
485 list_add(&core->list, &bus->cores); 490 list_add(&core->list, &bus->cores);
486 } 491 }
@@ -532,11 +537,10 @@ int __init bcma_bus_scan_early(struct bcma_bus *bus,
532 537
533 core->core_index = core_num++; 538 core->core_index = core_num++;
534 bus->nr_cores++; 539 bus->nr_cores++;
535 pr_info("Core %d found: %s " 540 bcma_info(bus, "Core %d found: %s (manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n",
536 "(manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n", 541 core->core_index, bcma_device_name(&core->id),
537 core->core_index, bcma_device_name(&core->id), 542 core->id.manuf, core->id.id, core->id.rev,
538 core->id.manuf, core->id.id, core->id.rev, 543 core->id.class);
539 core->id.class);
540 544
541 list_add(&core->list, &bus->cores); 545 list_add(&core->list, &bus->cores);
542 err = 0; 546 err = 0;
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index f16f42d36071..26823d97fd9f 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -60,11 +60,11 @@ static int bcma_fill_sprom_with_fallback(struct bcma_bus *bus,
60 if (err) 60 if (err)
61 goto fail; 61 goto fail;
62 62
63 pr_debug("Using SPROM revision %d provided by" 63 bcma_debug(bus, "Using SPROM revision %d provided by platform.\n",
64 " platform.\n", bus->sprom.revision); 64 bus->sprom.revision);
65 return 0; 65 return 0;
66fail: 66fail:
67 pr_warn("Using fallback SPROM failed (err %d)\n", err); 67 bcma_warn(bus, "Using fallback SPROM failed (err %d)\n", err);
68 return err; 68 return err;
69} 69}
70 70
@@ -468,11 +468,11 @@ static bool bcma_sprom_ext_available(struct bcma_bus *bus)
468 /* older chipcommon revisions use chip status register */ 468 /* older chipcommon revisions use chip status register */
469 chip_status = bcma_read32(bus->drv_cc.core, BCMA_CC_CHIPSTAT); 469 chip_status = bcma_read32(bus->drv_cc.core, BCMA_CC_CHIPSTAT);
470 switch (bus->chipinfo.id) { 470 switch (bus->chipinfo.id) {
471 case 0x4313: 471 case BCMA_CHIP_ID_BCM4313:
472 present_mask = BCMA_CC_CHIPST_4313_SPROM_PRESENT; 472 present_mask = BCMA_CC_CHIPST_4313_SPROM_PRESENT;
473 break; 473 break;
474 474
475 case 0x4331: 475 case BCMA_CHIP_ID_BCM4331:
476 present_mask = BCMA_CC_CHIPST_4331_SPROM_PRESENT; 476 present_mask = BCMA_CC_CHIPST_4331_SPROM_PRESENT;
477 break; 477 break;
478 478
@@ -494,16 +494,16 @@ static bool bcma_sprom_onchip_available(struct bcma_bus *bus)
494 494
495 chip_status = bcma_read32(bus->drv_cc.core, BCMA_CC_CHIPSTAT); 495 chip_status = bcma_read32(bus->drv_cc.core, BCMA_CC_CHIPSTAT);
496 switch (bus->chipinfo.id) { 496 switch (bus->chipinfo.id) {
497 case 0x4313: 497 case BCMA_CHIP_ID_BCM4313:
498 present = chip_status & BCMA_CC_CHIPST_4313_OTP_PRESENT; 498 present = chip_status & BCMA_CC_CHIPST_4313_OTP_PRESENT;
499 break; 499 break;
500 500
501 case 0x4331: 501 case BCMA_CHIP_ID_BCM4331:
502 present = chip_status & BCMA_CC_CHIPST_4331_OTP_PRESENT; 502 present = chip_status & BCMA_CC_CHIPST_4331_OTP_PRESENT;
503 break; 503 break;
504 504
505 case 43224: 505 case BCMA_CHIP_ID_BCM43224:
506 case 43225: 506 case BCMA_CHIP_ID_BCM43225:
507 /* for these chips OTP is always available */ 507 /* for these chips OTP is always available */
508 present = true; 508 present = true;
509 break; 509 break;
@@ -579,13 +579,15 @@ int bcma_sprom_get(struct bcma_bus *bus)
579 if (!sprom) 579 if (!sprom)
580 return -ENOMEM; 580 return -ENOMEM;
581 581
582 if (bus->chipinfo.id == 0x4331 || bus->chipinfo.id == 43431) 582 if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4331 ||
583 bus->chipinfo.id == BCMA_CHIP_ID_BCM43431)
583 bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, false); 584 bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, false);
584 585
585 pr_debug("SPROM offset 0x%x\n", offset); 586 bcma_debug(bus, "SPROM offset 0x%x\n", offset);
586 bcma_sprom_read(bus, offset, sprom); 587 bcma_sprom_read(bus, offset, sprom);
587 588
588 if (bus->chipinfo.id == 0x4331 || bus->chipinfo.id == 43431) 589 if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4331 ||
590 bus->chipinfo.id == BCMA_CHIP_ID_BCM43431)
589 bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true); 591 bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true);
590 592
591 err = bcma_sprom_valid(sprom); 593 err = bcma_sprom_valid(sprom);
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 1fcd92380356..585c88e01893 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -231,12 +231,12 @@ static void bluecard_write_wakeup(bluecard_info_t *info)
231 } 231 }
232 232
233 do { 233 do {
234 register unsigned int iobase = info->p_dev->resource[0]->start; 234 unsigned int iobase = info->p_dev->resource[0]->start;
235 register unsigned int offset; 235 unsigned int offset;
236 register unsigned char command; 236 unsigned char command;
237 register unsigned long ready_bit; 237 unsigned long ready_bit;
238 register struct sk_buff *skb; 238 register struct sk_buff *skb;
239 register int len; 239 int len;
240 240
241 clear_bit(XMIT_WAKEUP, &(info->tx_state)); 241 clear_bit(XMIT_WAKEUP, &(info->tx_state));
242 242
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 609861a53c28..29caaed2d715 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -470,7 +470,7 @@ static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *
470 hdev->flush = bpa10x_flush; 470 hdev->flush = bpa10x_flush;
471 hdev->send = bpa10x_send_frame; 471 hdev->send = bpa10x_send_frame;
472 472
473 set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); 473 set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
474 474
475 err = hci_register_dev(hdev); 475 err = hci_register_dev(hdev);
476 if (err < 0) { 476 if (err < 0) {
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 308c8599ab55..b2b0fbbb43b5 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -186,9 +186,9 @@ static void bt3c_write_wakeup(bt3c_info_t *info)
186 return; 186 return;
187 187
188 do { 188 do {
189 register unsigned int iobase = info->p_dev->resource[0]->start; 189 unsigned int iobase = info->p_dev->resource[0]->start;
190 register struct sk_buff *skb; 190 register struct sk_buff *skb;
191 register int len; 191 int len;
192 192
193 if (!pcmcia_dev_present(info->p_dev)) 193 if (!pcmcia_dev_present(info->p_dev))
194 break; 194 break;
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 0cd61d9f07cd..cf7588edba0d 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -110,6 +110,9 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = {
110 /* Marvell SD8787 Bluetooth device */ 110 /* Marvell SD8787 Bluetooth device */
111 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A), 111 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A),
112 .driver_data = (unsigned long) &btmrvl_sdio_sd8787 }, 112 .driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
113 /* Marvell SD8787 Bluetooth AMP device */
114 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911B),
115 .driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
113 /* Marvell SD8797 Bluetooth device */ 116 /* Marvell SD8797 Bluetooth device */
114 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A), 117 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A),
115 .driver_data = (unsigned long) &btmrvl_sdio_sd8797 }, 118 .driver_data = (unsigned long) &btmrvl_sdio_sd8797 },
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index c4fc2f3fc32c..65b8d996840c 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -140,9 +140,9 @@ static void btuart_write_wakeup(btuart_info_t *info)
140 } 140 }
141 141
142 do { 142 do {
143 register unsigned int iobase = info->p_dev->resource[0]->start; 143 unsigned int iobase = info->p_dev->resource[0]->start;
144 register struct sk_buff *skb; 144 register struct sk_buff *skb;
145 register int len; 145 int len;
146 146
147 clear_bit(XMIT_WAKEUP, &(info->tx_state)); 147 clear_bit(XMIT_WAKEUP, &(info->tx_state));
148 148
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 83ebb241bfcc..e27221411036 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -21,15 +21,7 @@
21 * 21 *
22 */ 22 */
23 23
24#include <linux/kernel.h>
25#include <linux/module.h> 24#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/slab.h>
28#include <linux/types.h>
29#include <linux/sched.h>
30#include <linux/errno.h>
31#include <linux/skbuff.h>
32
33#include <linux/usb.h> 25#include <linux/usb.h>
34 26
35#include <net/bluetooth/bluetooth.h> 27#include <net/bluetooth/bluetooth.h>
@@ -1028,7 +1020,7 @@ static int btusb_probe(struct usb_interface *intf,
1028 data->isoc = usb_ifnum_to_if(data->udev, 1); 1020 data->isoc = usb_ifnum_to_if(data->udev, 1);
1029 1021
1030 if (!reset) 1022 if (!reset)
1031 set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); 1023 set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
1032 1024
1033 if (force_scofix || id->driver_info & BTUSB_WRONG_SCO_MTU) { 1025 if (force_scofix || id->driver_info & BTUSB_WRONG_SCO_MTU) {
1034 if (!disable_scofix) 1026 if (!disable_scofix)
@@ -1040,7 +1032,7 @@ static int btusb_probe(struct usb_interface *intf,
1040 1032
1041 if (id->driver_info & BTUSB_DIGIANSWER) { 1033 if (id->driver_info & BTUSB_DIGIANSWER) {
1042 data->cmdreq_type = USB_TYPE_VENDOR; 1034 data->cmdreq_type = USB_TYPE_VENDOR;
1043 set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); 1035 set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
1044 } 1036 }
1045 1037
1046 if (id->driver_info & BTUSB_CSR) { 1038 if (id->driver_info & BTUSB_CSR) {
@@ -1048,7 +1040,7 @@ static int btusb_probe(struct usb_interface *intf,
1048 1040
1049 /* Old firmware would otherwise execute USB reset */ 1041 /* Old firmware would otherwise execute USB reset */
1050 if (le16_to_cpu(udev->descriptor.bcdDevice) < 0x117) 1042 if (le16_to_cpu(udev->descriptor.bcdDevice) < 0x117)
1051 set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); 1043 set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
1052 } 1044 }
1053 1045
1054 if (id->driver_info & BTUSB_SNIFFER) { 1046 if (id->driver_info & BTUSB_SNIFFER) {
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 6e8d96189684..b1b37ccd3cd4 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -144,9 +144,9 @@ static void dtl1_write_wakeup(dtl1_info_t *info)
144 } 144 }
145 145
146 do { 146 do {
147 register unsigned int iobase = info->p_dev->resource[0]->start; 147 unsigned int iobase = info->p_dev->resource[0]->start;
148 register struct sk_buff *skb; 148 register struct sk_buff *skb;
149 register int len; 149 int len;
150 150
151 clear_bit(XMIT_WAKEUP, &(info->tx_state)); 151 clear_bit(XMIT_WAKEUP, &(info->tx_state));
152 152
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 661a8dc4d2f8..57e502e06080 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -552,7 +552,7 @@ static u16 bscp_get_crc(struct bcsp_struct *bcsp)
552static int bcsp_recv(struct hci_uart *hu, void *data, int count) 552static int bcsp_recv(struct hci_uart *hu, void *data, int count)
553{ 553{
554 struct bcsp_struct *bcsp = hu->priv; 554 struct bcsp_struct *bcsp = hu->priv;
555 register unsigned char *ptr; 555 unsigned char *ptr;
556 556
557 BT_DBG("hu %p count %d rx_state %d rx_count %ld", 557 BT_DBG("hu %p count %d rx_state %d rx_count %ld",
558 hu, count, bcsp->rx_state, bcsp->rx_count); 558 hu, count, bcsp->rx_state, bcsp->rx_count);
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 748329468d26..c60623f206d4 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -126,7 +126,7 @@ static int h4_enqueue(struct hci_uart *hu, struct sk_buff *skb)
126 126
127static inline int h4_check_data_len(struct h4_struct *h4, int len) 127static inline int h4_check_data_len(struct h4_struct *h4, int len)
128{ 128{
129 register int room = skb_tailroom(h4->rx_skb); 129 int room = skb_tailroom(h4->rx_skb);
130 130
131 BT_DBG("len %d room %d", len, room); 131 BT_DBG("len %d room %d", len, room);
132 132
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index e564579a6115..2f9b796e106e 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -394,7 +394,7 @@ static int hci_uart_register_dev(struct hci_uart *hu)
394 set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); 394 set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
395 395
396 if (!test_bit(HCI_UART_RESET_ON_INIT, &hu->hdev_flags)) 396 if (!test_bit(HCI_UART_RESET_ON_INIT, &hu->hdev_flags))
397 set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); 397 set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
398 398
399 if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags)) 399 if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags))
400 hdev->dev_type = HCI_AMP; 400 hdev->dev_type = HCI_AMP;
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index b874c0efde24..ff6d589c34a5 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -348,7 +348,7 @@ static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb)
348 348
349static inline int ll_check_data_len(struct ll_struct *ll, int len) 349static inline int ll_check_data_len(struct ll_struct *ll, int len)
350{ 350{
351 register int room = skb_tailroom(ll->rx_skb); 351 int room = skb_tailroom(ll->rx_skb);
352 352
353 BT_DBG("len %d room %d", len, room); 353 BT_DBG("len %d room %d", len, room);
354 354
@@ -374,11 +374,11 @@ static inline int ll_check_data_len(struct ll_struct *ll, int len)
374static int ll_recv(struct hci_uart *hu, void *data, int count) 374static int ll_recv(struct hci_uart *hu, void *data, int count)
375{ 375{
376 struct ll_struct *ll = hu->priv; 376 struct ll_struct *ll = hu->priv;
377 register char *ptr; 377 char *ptr;
378 struct hci_event_hdr *eh; 378 struct hci_event_hdr *eh;
379 struct hci_acl_hdr *ah; 379 struct hci_acl_hdr *ah;
380 struct hci_sco_hdr *sh; 380 struct hci_sco_hdr *sh;
381 register int len, type, dlen; 381 int len, type, dlen;
382 382
383 BT_DBG("hu %p count %d rx_state %ld rx_count %ld", hu, count, ll->rx_state, ll->rx_count); 383 BT_DBG("hu %p count %d rx_state %ld rx_count %ld", hu, count, ll->rx_state, ll->rx_count);
384 384
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 77e1e6cd66ce..3e92b7d3fcd2 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -46,7 +46,7 @@ static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 };
46static inline void get_seq(__u32 *ts, int *cpu) 46static inline void get_seq(__u32 *ts, int *cpu)
47{ 47{
48 preempt_disable(); 48 preempt_disable();
49 *ts = __this_cpu_inc_return(proc_event_counts) -1; 49 *ts = __this_cpu_inc_return(proc_event_counts) - 1;
50 *cpu = smp_processor_id(); 50 *cpu = smp_processor_id();
51 preempt_enable(); 51 preempt_enable();
52} 52}
@@ -62,8 +62,8 @@ void proc_fork_connector(struct task_struct *task)
62 if (atomic_read(&proc_event_num_listeners) < 1) 62 if (atomic_read(&proc_event_num_listeners) < 1)
63 return; 63 return;
64 64
65 msg = (struct cn_msg*)buffer; 65 msg = (struct cn_msg *)buffer;
66 ev = (struct proc_event*)msg->data; 66 ev = (struct proc_event *)msg->data;
67 get_seq(&msg->seq, &ev->cpu); 67 get_seq(&msg->seq, &ev->cpu);
68 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 68 ktime_get_ts(&ts); /* get high res monotonic timestamp */
69 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 69 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -93,8 +93,8 @@ void proc_exec_connector(struct task_struct *task)
93 if (atomic_read(&proc_event_num_listeners) < 1) 93 if (atomic_read(&proc_event_num_listeners) < 1)
94 return; 94 return;
95 95
96 msg = (struct cn_msg*)buffer; 96 msg = (struct cn_msg *)buffer;
97 ev = (struct proc_event*)msg->data; 97 ev = (struct proc_event *)msg->data;
98 get_seq(&msg->seq, &ev->cpu); 98 get_seq(&msg->seq, &ev->cpu);
99 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 99 ktime_get_ts(&ts); /* get high res monotonic timestamp */
100 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 100 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -119,8 +119,8 @@ void proc_id_connector(struct task_struct *task, int which_id)
119 if (atomic_read(&proc_event_num_listeners) < 1) 119 if (atomic_read(&proc_event_num_listeners) < 1)
120 return; 120 return;
121 121
122 msg = (struct cn_msg*)buffer; 122 msg = (struct cn_msg *)buffer;
123 ev = (struct proc_event*)msg->data; 123 ev = (struct proc_event *)msg->data;
124 ev->what = which_id; 124 ev->what = which_id;
125 ev->event_data.id.process_pid = task->pid; 125 ev->event_data.id.process_pid = task->pid;
126 ev->event_data.id.process_tgid = task->tgid; 126 ev->event_data.id.process_tgid = task->tgid;
@@ -134,7 +134,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
134 ev->event_data.id.e.egid = cred->egid; 134 ev->event_data.id.e.egid = cred->egid;
135 } else { 135 } else {
136 rcu_read_unlock(); 136 rcu_read_unlock();
137 return; 137 return;
138 } 138 }
139 rcu_read_unlock(); 139 rcu_read_unlock();
140 get_seq(&msg->seq, &ev->cpu); 140 get_seq(&msg->seq, &ev->cpu);
@@ -241,8 +241,8 @@ void proc_exit_connector(struct task_struct *task)
241 if (atomic_read(&proc_event_num_listeners) < 1) 241 if (atomic_read(&proc_event_num_listeners) < 1)
242 return; 242 return;
243 243
244 msg = (struct cn_msg*)buffer; 244 msg = (struct cn_msg *)buffer;
245 ev = (struct proc_event*)msg->data; 245 ev = (struct proc_event *)msg->data;
246 get_seq(&msg->seq, &ev->cpu); 246 get_seq(&msg->seq, &ev->cpu);
247 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 247 ktime_get_ts(&ts); /* get high res monotonic timestamp */
248 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 248 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -276,8 +276,8 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
276 if (atomic_read(&proc_event_num_listeners) < 1) 276 if (atomic_read(&proc_event_num_listeners) < 1)
277 return; 277 return;
278 278
279 msg = (struct cn_msg*)buffer; 279 msg = (struct cn_msg *)buffer;
280 ev = (struct proc_event*)msg->data; 280 ev = (struct proc_event *)msg->data;
281 msg->seq = rcvd_seq; 281 msg->seq = rcvd_seq;
282 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 282 ktime_get_ts(&ts); /* get high res monotonic timestamp */
283 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 283 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -303,7 +303,7 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
303 if (msg->len != sizeof(*mc_op)) 303 if (msg->len != sizeof(*mc_op))
304 return; 304 return;
305 305
306 mc_op = (enum proc_cn_mcast_op*)msg->data; 306 mc_op = (enum proc_cn_mcast_op *)msg->data;
307 switch (*mc_op) { 307 switch (*mc_op) {
308 case PROC_CN_MCAST_LISTEN: 308 case PROC_CN_MCAST_LISTEN:
309 atomic_inc(&proc_event_num_listeners); 309 atomic_inc(&proc_event_num_listeners);
@@ -325,11 +325,11 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
325 */ 325 */
326static int __init cn_proc_init(void) 326static int __init cn_proc_init(void)
327{ 327{
328 int err; 328 int err = cn_add_callback(&cn_proc_event_id,
329 329 "cn_proc",
330 if ((err = cn_add_callback(&cn_proc_event_id, "cn_proc", 330 &cn_proc_mcast_ctl);
331 &cn_proc_mcast_ctl))) { 331 if (err) {
332 printk(KERN_WARNING "cn_proc failed to register\n"); 332 pr_warn("cn_proc failed to register\n");
333 return err; 333 return err;
334 } 334 }
335 return 0; 335 return 0;
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c
index c42c9d517790..1f8bf054d11c 100644
--- a/drivers/connector/cn_queue.c
+++ b/drivers/connector/cn_queue.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * cn_queue.c 2 * cn_queue.c
3 * 3 *
4 * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net> 4 * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
5 * All rights reserved. 5 * All rights reserved.
@@ -34,13 +34,14 @@
34static struct cn_callback_entry * 34static struct cn_callback_entry *
35cn_queue_alloc_callback_entry(struct cn_queue_dev *dev, const char *name, 35cn_queue_alloc_callback_entry(struct cn_queue_dev *dev, const char *name,
36 struct cb_id *id, 36 struct cb_id *id,
37 void (*callback)(struct cn_msg *, struct netlink_skb_parms *)) 37 void (*callback)(struct cn_msg *,
38 struct netlink_skb_parms *))
38{ 39{
39 struct cn_callback_entry *cbq; 40 struct cn_callback_entry *cbq;
40 41
41 cbq = kzalloc(sizeof(*cbq), GFP_KERNEL); 42 cbq = kzalloc(sizeof(*cbq), GFP_KERNEL);
42 if (!cbq) { 43 if (!cbq) {
43 printk(KERN_ERR "Failed to create new callback queue.\n"); 44 pr_err("Failed to create new callback queue.\n");
44 return NULL; 45 return NULL;
45 } 46 }
46 47
@@ -71,7 +72,8 @@ int cn_cb_equal(struct cb_id *i1, struct cb_id *i2)
71 72
72int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name, 73int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name,
73 struct cb_id *id, 74 struct cb_id *id,
74 void (*callback)(struct cn_msg *, struct netlink_skb_parms *)) 75 void (*callback)(struct cn_msg *,
76 struct netlink_skb_parms *))
75{ 77{
76 struct cn_callback_entry *cbq, *__cbq; 78 struct cn_callback_entry *cbq, *__cbq;
77 int found = 0; 79 int found = 0;
@@ -149,7 +151,7 @@ void cn_queue_free_dev(struct cn_queue_dev *dev)
149 spin_unlock_bh(&dev->queue_lock); 151 spin_unlock_bh(&dev->queue_lock);
150 152
151 while (atomic_read(&dev->refcnt)) { 153 while (atomic_read(&dev->refcnt)) {
152 printk(KERN_INFO "Waiting for %s to become free: refcnt=%d.\n", 154 pr_info("Waiting for %s to become free: refcnt=%d.\n",
153 dev->name, atomic_read(&dev->refcnt)); 155 dev->name, atomic_read(&dev->refcnt));
154 msleep(1000); 156 msleep(1000);
155 } 157 }
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index dde6a0fad408..82fa4f0f91d6 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * connector.c 2 * connector.c
3 * 3 *
4 * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net> 4 * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
5 * All rights reserved. 5 * All rights reserved.
@@ -101,19 +101,19 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
101 if (!skb) 101 if (!skb)
102 return -ENOMEM; 102 return -ENOMEM;
103 103
104 nlh = NLMSG_PUT(skb, 0, msg->seq, NLMSG_DONE, size - sizeof(*nlh)); 104 nlh = nlmsg_put(skb, 0, msg->seq, NLMSG_DONE, size - sizeof(*nlh), 0);
105 if (!nlh) {
106 kfree_skb(skb);
107 return -EMSGSIZE;
108 }
105 109
106 data = NLMSG_DATA(nlh); 110 data = nlmsg_data(nlh);
107 111
108 memcpy(data, msg, sizeof(*data) + msg->len); 112 memcpy(data, msg, sizeof(*data) + msg->len);
109 113
110 NETLINK_CB(skb).dst_group = group; 114 NETLINK_CB(skb).dst_group = group;
111 115
112 return netlink_broadcast(dev->nls, skb, 0, group, gfp_mask); 116 return netlink_broadcast(dev->nls, skb, 0, group, gfp_mask);
113
114nlmsg_failure:
115 kfree_skb(skb);
116 return -EINVAL;
117} 117}
118EXPORT_SYMBOL_GPL(cn_netlink_send); 118EXPORT_SYMBOL_GPL(cn_netlink_send);
119 119
@@ -185,7 +185,8 @@ static void cn_rx_skb(struct sk_buff *__skb)
185 * May sleep. 185 * May sleep.
186 */ 186 */
187int cn_add_callback(struct cb_id *id, const char *name, 187int cn_add_callback(struct cb_id *id, const char *name,
188 void (*callback)(struct cn_msg *, struct netlink_skb_parms *)) 188 void (*callback)(struct cn_msg *,
189 struct netlink_skb_parms *))
189{ 190{
190 int err; 191 int err;
191 struct cn_dev *dev = &cdev; 192 struct cn_dev *dev = &cdev;
@@ -251,15 +252,20 @@ static const struct file_operations cn_file_ops = {
251 .release = single_release 252 .release = single_release
252}; 253};
253 254
255static struct cn_dev cdev = {
256 .input = cn_rx_skb,
257};
258
254static int __devinit cn_init(void) 259static int __devinit cn_init(void)
255{ 260{
256 struct cn_dev *dev = &cdev; 261 struct cn_dev *dev = &cdev;
257 262 struct netlink_kernel_cfg cfg = {
258 dev->input = cn_rx_skb; 263 .groups = CN_NETLINK_USERS + 0xf,
264 .input = dev->input,
265 };
259 266
260 dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, 267 dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR,
261 CN_NETLINK_USERS + 0xf, 268 THIS_MODULE, &cfg);
262 dev->input, NULL, THIS_MODULE);
263 if (!dev->nls) 269 if (!dev->nls)
264 return -EIO; 270 return -EIO;
265 271
diff --git a/drivers/ieee802154/Kconfig b/drivers/ieee802154/Kconfig
index 15c064073701..1fc4eefc20ed 100644
--- a/drivers/ieee802154/Kconfig
+++ b/drivers/ieee802154/Kconfig
@@ -19,6 +19,7 @@ config IEEE802154_FAKEHARD
19 19
20 This driver can also be built as a module. To do so say M here. 20 This driver can also be built as a module. To do so say M here.
21 The module will be called 'fakehard'. 21 The module will be called 'fakehard'.
22
22config IEEE802154_FAKELB 23config IEEE802154_FAKELB
23 depends on IEEE802154_DRIVERS && MAC802154 24 depends on IEEE802154_DRIVERS && MAC802154
24 tristate "IEEE 802.15.4 loopback driver" 25 tristate "IEEE 802.15.4 loopback driver"
@@ -28,3 +29,8 @@ config IEEE802154_FAKELB
28 29
29 This driver can also be built as a module. To do so say M here. 30 This driver can also be built as a module. To do so say M here.
30 The module will be called 'fakelb'. 31 The module will be called 'fakelb'.
32
33config IEEE802154_AT86RF230
34 depends on IEEE802154_DRIVERS && MAC802154
35 tristate "AT86RF230/231 transceiver driver"
36 depends on SPI
diff --git a/drivers/ieee802154/Makefile b/drivers/ieee802154/Makefile
index ea784ea6f0f8..4f4371d3aa7d 100644
--- a/drivers/ieee802154/Makefile
+++ b/drivers/ieee802154/Makefile
@@ -1,2 +1,3 @@
1obj-$(CONFIG_IEEE802154_FAKEHARD) += fakehard.o 1obj-$(CONFIG_IEEE802154_FAKEHARD) += fakehard.o
2obj-$(CONFIG_IEEE802154_FAKELB) += fakelb.o 2obj-$(CONFIG_IEEE802154_FAKELB) += fakelb.o
3obj-$(CONFIG_IEEE802154_AT86RF230) += at86rf230.o
diff --git a/drivers/ieee802154/at86rf230.c b/drivers/ieee802154/at86rf230.c
new file mode 100644
index 000000000000..5d309408395d
--- /dev/null
+++ b/drivers/ieee802154/at86rf230.c
@@ -0,0 +1,968 @@
1/*
2 * AT86RF230/RF231 driver
3 *
4 * Copyright (C) 2009-2012 Siemens AG
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Written by:
20 * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
21 * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
22 */
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/interrupt.h>
26#include <linux/gpio.h>
27#include <linux/delay.h>
28#include <linux/mutex.h>
29#include <linux/workqueue.h>
30#include <linux/spinlock.h>
31#include <linux/spi/spi.h>
32#include <linux/spi/at86rf230.h>
33#include <linux/skbuff.h>
34
35#include <net/mac802154.h>
36#include <net/wpan-phy.h>
37
38struct at86rf230_local {
39 struct spi_device *spi;
40 int rstn, slp_tr, dig2;
41
42 u8 part;
43 u8 vers;
44
45 u8 buf[2];
46 struct mutex bmux;
47
48 struct work_struct irqwork;
49 struct completion tx_complete;
50
51 struct ieee802154_dev *dev;
52
53 spinlock_t lock;
54 bool irq_disabled;
55 bool is_tx;
56};
57
58#define RG_TRX_STATUS (0x01)
59#define SR_TRX_STATUS 0x01, 0x1f, 0
60#define SR_RESERVED_01_3 0x01, 0x20, 5
61#define SR_CCA_STATUS 0x01, 0x40, 6
62#define SR_CCA_DONE 0x01, 0x80, 7
63#define RG_TRX_STATE (0x02)
64#define SR_TRX_CMD 0x02, 0x1f, 0
65#define SR_TRAC_STATUS 0x02, 0xe0, 5
66#define RG_TRX_CTRL_0 (0x03)
67#define SR_CLKM_CTRL 0x03, 0x07, 0
68#define SR_CLKM_SHA_SEL 0x03, 0x08, 3
69#define SR_PAD_IO_CLKM 0x03, 0x30, 4
70#define SR_PAD_IO 0x03, 0xc0, 6
71#define RG_TRX_CTRL_1 (0x04)
72#define SR_IRQ_POLARITY 0x04, 0x01, 0
73#define SR_IRQ_MASK_MODE 0x04, 0x02, 1
74#define SR_SPI_CMD_MODE 0x04, 0x0c, 2
75#define SR_RX_BL_CTRL 0x04, 0x10, 4
76#define SR_TX_AUTO_CRC_ON 0x04, 0x20, 5
77#define SR_IRQ_2_EXT_EN 0x04, 0x40, 6
78#define SR_PA_EXT_EN 0x04, 0x80, 7
79#define RG_PHY_TX_PWR (0x05)
80#define SR_TX_PWR 0x05, 0x0f, 0
81#define SR_PA_LT 0x05, 0x30, 4
82#define SR_PA_BUF_LT 0x05, 0xc0, 6
83#define RG_PHY_RSSI (0x06)
84#define SR_RSSI 0x06, 0x1f, 0
85#define SR_RND_VALUE 0x06, 0x60, 5
86#define SR_RX_CRC_VALID 0x06, 0x80, 7
87#define RG_PHY_ED_LEVEL (0x07)
88#define SR_ED_LEVEL 0x07, 0xff, 0
89#define RG_PHY_CC_CCA (0x08)
90#define SR_CHANNEL 0x08, 0x1f, 0
91#define SR_CCA_MODE 0x08, 0x60, 5
92#define SR_CCA_REQUEST 0x08, 0x80, 7
93#define RG_CCA_THRES (0x09)
94#define SR_CCA_ED_THRES 0x09, 0x0f, 0
95#define SR_RESERVED_09_1 0x09, 0xf0, 4
96#define RG_RX_CTRL (0x0a)
97#define SR_PDT_THRES 0x0a, 0x0f, 0
98#define SR_RESERVED_0a_1 0x0a, 0xf0, 4
99#define RG_SFD_VALUE (0x0b)
100#define SR_SFD_VALUE 0x0b, 0xff, 0
101#define RG_TRX_CTRL_2 (0x0c)
102#define SR_OQPSK_DATA_RATE 0x0c, 0x03, 0
103#define SR_RESERVED_0c_2 0x0c, 0x7c, 2
104#define SR_RX_SAFE_MODE 0x0c, 0x80, 7
105#define RG_ANT_DIV (0x0d)
106#define SR_ANT_CTRL 0x0d, 0x03, 0
107#define SR_ANT_EXT_SW_EN 0x0d, 0x04, 2
108#define SR_ANT_DIV_EN 0x0d, 0x08, 3
109#define SR_RESERVED_0d_2 0x0d, 0x70, 4
110#define SR_ANT_SEL 0x0d, 0x80, 7
111#define RG_IRQ_MASK (0x0e)
112#define SR_IRQ_MASK 0x0e, 0xff, 0
113#define RG_IRQ_STATUS (0x0f)
114#define SR_IRQ_0_PLL_LOCK 0x0f, 0x01, 0
115#define SR_IRQ_1_PLL_UNLOCK 0x0f, 0x02, 1
116#define SR_IRQ_2_RX_START 0x0f, 0x04, 2
117#define SR_IRQ_3_TRX_END 0x0f, 0x08, 3
118#define SR_IRQ_4_CCA_ED_DONE 0x0f, 0x10, 4
119#define SR_IRQ_5_AMI 0x0f, 0x20, 5
120#define SR_IRQ_6_TRX_UR 0x0f, 0x40, 6
121#define SR_IRQ_7_BAT_LOW 0x0f, 0x80, 7
122#define RG_VREG_CTRL (0x10)
123#define SR_RESERVED_10_6 0x10, 0x03, 0
124#define SR_DVDD_OK 0x10, 0x04, 2
125#define SR_DVREG_EXT 0x10, 0x08, 3
126#define SR_RESERVED_10_3 0x10, 0x30, 4
127#define SR_AVDD_OK 0x10, 0x40, 6
128#define SR_AVREG_EXT 0x10, 0x80, 7
129#define RG_BATMON (0x11)
130#define SR_BATMON_VTH 0x11, 0x0f, 0
131#define SR_BATMON_HR 0x11, 0x10, 4
132#define SR_BATMON_OK 0x11, 0x20, 5
133#define SR_RESERVED_11_1 0x11, 0xc0, 6
134#define RG_XOSC_CTRL (0x12)
135#define SR_XTAL_TRIM 0x12, 0x0f, 0
136#define SR_XTAL_MODE 0x12, 0xf0, 4
137#define RG_RX_SYN (0x15)
138#define SR_RX_PDT_LEVEL 0x15, 0x0f, 0
139#define SR_RESERVED_15_2 0x15, 0x70, 4
140#define SR_RX_PDT_DIS 0x15, 0x80, 7
141#define RG_XAH_CTRL_1 (0x17)
142#define SR_RESERVED_17_8 0x17, 0x01, 0
143#define SR_AACK_PROM_MODE 0x17, 0x02, 1
144#define SR_AACK_ACK_TIME 0x17, 0x04, 2
145#define SR_RESERVED_17_5 0x17, 0x08, 3
146#define SR_AACK_UPLD_RES_FT 0x17, 0x10, 4
147#define SR_AACK_FLTR_RES_FT 0x17, 0x20, 5
148#define SR_RESERVED_17_2 0x17, 0x40, 6
149#define SR_RESERVED_17_1 0x17, 0x80, 7
150#define RG_FTN_CTRL (0x18)
151#define SR_RESERVED_18_2 0x18, 0x7f, 0
152#define SR_FTN_START 0x18, 0x80, 7
153#define RG_PLL_CF (0x1a)
154#define SR_RESERVED_1a_2 0x1a, 0x7f, 0
155#define SR_PLL_CF_START 0x1a, 0x80, 7
156#define RG_PLL_DCU (0x1b)
157#define SR_RESERVED_1b_3 0x1b, 0x3f, 0
158#define SR_RESERVED_1b_2 0x1b, 0x40, 6
159#define SR_PLL_DCU_START 0x1b, 0x80, 7
160#define RG_PART_NUM (0x1c)
161#define SR_PART_NUM 0x1c, 0xff, 0
162#define RG_VERSION_NUM (0x1d)
163#define SR_VERSION_NUM 0x1d, 0xff, 0
164#define RG_MAN_ID_0 (0x1e)
165#define SR_MAN_ID_0 0x1e, 0xff, 0
166#define RG_MAN_ID_1 (0x1f)
167#define SR_MAN_ID_1 0x1f, 0xff, 0
168#define RG_SHORT_ADDR_0 (0x20)
169#define SR_SHORT_ADDR_0 0x20, 0xff, 0
170#define RG_SHORT_ADDR_1 (0x21)
171#define SR_SHORT_ADDR_1 0x21, 0xff, 0
172#define RG_PAN_ID_0 (0x22)
173#define SR_PAN_ID_0 0x22, 0xff, 0
174#define RG_PAN_ID_1 (0x23)
175#define SR_PAN_ID_1 0x23, 0xff, 0
176#define RG_IEEE_ADDR_0 (0x24)
177#define SR_IEEE_ADDR_0 0x24, 0xff, 0
178#define RG_IEEE_ADDR_1 (0x25)
179#define SR_IEEE_ADDR_1 0x25, 0xff, 0
180#define RG_IEEE_ADDR_2 (0x26)
181#define SR_IEEE_ADDR_2 0x26, 0xff, 0
182#define RG_IEEE_ADDR_3 (0x27)
183#define SR_IEEE_ADDR_3 0x27, 0xff, 0
184#define RG_IEEE_ADDR_4 (0x28)
185#define SR_IEEE_ADDR_4 0x28, 0xff, 0
186#define RG_IEEE_ADDR_5 (0x29)
187#define SR_IEEE_ADDR_5 0x29, 0xff, 0
188#define RG_IEEE_ADDR_6 (0x2a)
189#define SR_IEEE_ADDR_6 0x2a, 0xff, 0
190#define RG_IEEE_ADDR_7 (0x2b)
191#define SR_IEEE_ADDR_7 0x2b, 0xff, 0
192#define RG_XAH_CTRL_0 (0x2c)
193#define SR_SLOTTED_OPERATION 0x2c, 0x01, 0
194#define SR_MAX_CSMA_RETRIES 0x2c, 0x0e, 1
195#define SR_MAX_FRAME_RETRIES 0x2c, 0xf0, 4
196#define RG_CSMA_SEED_0 (0x2d)
197#define SR_CSMA_SEED_0 0x2d, 0xff, 0
198#define RG_CSMA_SEED_1 (0x2e)
199#define SR_CSMA_SEED_1 0x2e, 0x07, 0
200#define SR_AACK_I_AM_COORD 0x2e, 0x08, 3
201#define SR_AACK_DIS_ACK 0x2e, 0x10, 4
202#define SR_AACK_SET_PD 0x2e, 0x20, 5
203#define SR_AACK_FVN_MODE 0x2e, 0xc0, 6
204#define RG_CSMA_BE (0x2f)
205#define SR_MIN_BE 0x2f, 0x0f, 0
206#define SR_MAX_BE 0x2f, 0xf0, 4
207
208#define CMD_REG 0x80
209#define CMD_REG_MASK 0x3f
210#define CMD_WRITE 0x40
211#define CMD_FB 0x20
212
213#define IRQ_BAT_LOW (1 << 7)
214#define IRQ_TRX_UR (1 << 6)
215#define IRQ_AMI (1 << 5)
216#define IRQ_CCA_ED (1 << 4)
217#define IRQ_TRX_END (1 << 3)
218#define IRQ_RX_START (1 << 2)
219#define IRQ_PLL_UNL (1 << 1)
220#define IRQ_PLL_LOCK (1 << 0)
221
222#define STATE_P_ON 0x00 /* BUSY */
223#define STATE_BUSY_RX 0x01
224#define STATE_BUSY_TX 0x02
225#define STATE_FORCE_TRX_OFF 0x03
226#define STATE_FORCE_TX_ON 0x04 /* IDLE */
227/* 0x05 */ /* INVALID_PARAMETER */
228#define STATE_RX_ON 0x06
229/* 0x07 */ /* SUCCESS */
230#define STATE_TRX_OFF 0x08
231#define STATE_TX_ON 0x09
232/* 0x0a - 0x0e */ /* 0x0a - UNSUPPORTED_ATTRIBUTE */
233#define STATE_SLEEP 0x0F
234#define STATE_BUSY_RX_AACK 0x11
235#define STATE_BUSY_TX_ARET 0x12
236#define STATE_BUSY_RX_AACK_ON 0x16
237#define STATE_BUSY_TX_ARET_ON 0x19
238#define STATE_RX_ON_NOCLK 0x1C
239#define STATE_RX_AACK_ON_NOCLK 0x1D
240#define STATE_BUSY_RX_AACK_NOCLK 0x1E
241#define STATE_TRANSITION_IN_PROGRESS 0x1F
242
243static int
244__at86rf230_write(struct at86rf230_local *lp, u8 addr, u8 data)
245{
246 u8 *buf = lp->buf;
247 int status;
248 struct spi_message msg;
249 struct spi_transfer xfer = {
250 .len = 2,
251 .tx_buf = buf,
252 };
253
254 buf[0] = (addr & CMD_REG_MASK) | CMD_REG | CMD_WRITE;
255 buf[1] = data;
256 dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
257 dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
258 spi_message_init(&msg);
259 spi_message_add_tail(&xfer, &msg);
260
261 status = spi_sync(lp->spi, &msg);
262 dev_vdbg(&lp->spi->dev, "status = %d\n", status);
263 if (msg.status)
264 status = msg.status;
265
266 dev_vdbg(&lp->spi->dev, "status = %d\n", status);
267 dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
268 dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
269
270 return status;
271}
272
273static int
274__at86rf230_read_subreg(struct at86rf230_local *lp,
275 u8 addr, u8 mask, int shift, u8 *data)
276{
277 u8 *buf = lp->buf;
278 int status;
279 struct spi_message msg;
280 struct spi_transfer xfer = {
281 .len = 2,
282 .tx_buf = buf,
283 .rx_buf = buf,
284 };
285
286 buf[0] = (addr & CMD_REG_MASK) | CMD_REG;
287 buf[1] = 0xff;
288 dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
289 spi_message_init(&msg);
290 spi_message_add_tail(&xfer, &msg);
291
292 status = spi_sync(lp->spi, &msg);
293 dev_vdbg(&lp->spi->dev, "status = %d\n", status);
294 if (msg.status)
295 status = msg.status;
296
297 dev_vdbg(&lp->spi->dev, "status = %d\n", status);
298 dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
299 dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
300
301 if (status == 0)
302 *data = buf[1];
303
304 return status;
305}
306
307static int
308at86rf230_read_subreg(struct at86rf230_local *lp,
309 u8 addr, u8 mask, int shift, u8 *data)
310{
311 int status;
312
313 mutex_lock(&lp->bmux);
314 status = __at86rf230_read_subreg(lp, addr, mask, shift, data);
315 mutex_unlock(&lp->bmux);
316
317 return status;
318}
319
320static int
321at86rf230_write_subreg(struct at86rf230_local *lp,
322 u8 addr, u8 mask, int shift, u8 data)
323{
324 int status;
325 u8 val;
326
327 mutex_lock(&lp->bmux);
328 status = __at86rf230_read_subreg(lp, addr, 0xff, 0, &val);
329 if (status)
330 goto out;
331
332 val &= ~mask;
333 val |= (data << shift) & mask;
334
335 status = __at86rf230_write(lp, addr, val);
336out:
337 mutex_unlock(&lp->bmux);
338
339 return status;
340}
341
342static int
343at86rf230_write_fbuf(struct at86rf230_local *lp, u8 *data, u8 len)
344{
345 u8 *buf = lp->buf;
346 int status;
347 struct spi_message msg;
348 struct spi_transfer xfer_head = {
349 .len = 2,
350 .tx_buf = buf,
351
352 };
353 struct spi_transfer xfer_buf = {
354 .len = len,
355 .tx_buf = data,
356 };
357
358 mutex_lock(&lp->bmux);
359 buf[0] = CMD_WRITE | CMD_FB;
360 buf[1] = len + 2; /* 2 bytes for CRC that isn't written */
361
362 dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
363 dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
364
365 spi_message_init(&msg);
366 spi_message_add_tail(&xfer_head, &msg);
367 spi_message_add_tail(&xfer_buf, &msg);
368
369 status = spi_sync(lp->spi, &msg);
370 dev_vdbg(&lp->spi->dev, "status = %d\n", status);
371 if (msg.status)
372 status = msg.status;
373
374 dev_vdbg(&lp->spi->dev, "status = %d\n", status);
375 dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
376 dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
377
378 mutex_unlock(&lp->bmux);
379 return status;
380}
381
382static int
383at86rf230_read_fbuf(struct at86rf230_local *lp, u8 *data, u8 *len, u8 *lqi)
384{
385 u8 *buf = lp->buf;
386 int status;
387 struct spi_message msg;
388 struct spi_transfer xfer_head = {
389 .len = 2,
390 .tx_buf = buf,
391 .rx_buf = buf,
392 };
393 struct spi_transfer xfer_head1 = {
394 .len = 2,
395 .tx_buf = buf,
396 .rx_buf = buf,
397 };
398 struct spi_transfer xfer_buf = {
399 .len = 0,
400 .rx_buf = data,
401 };
402
403 mutex_lock(&lp->bmux);
404
405 buf[0] = CMD_FB;
406 buf[1] = 0x00;
407
408 spi_message_init(&msg);
409 spi_message_add_tail(&xfer_head, &msg);
410
411 status = spi_sync(lp->spi, &msg);
412 dev_vdbg(&lp->spi->dev, "status = %d\n", status);
413
414 xfer_buf.len = *(buf + 1) + 1;
415 *len = buf[1];
416
417 buf[0] = CMD_FB;
418 buf[1] = 0x00;
419
420 spi_message_init(&msg);
421 spi_message_add_tail(&xfer_head1, &msg);
422 spi_message_add_tail(&xfer_buf, &msg);
423
424 status = spi_sync(lp->spi, &msg);
425
426 if (msg.status)
427 status = msg.status;
428
429 dev_vdbg(&lp->spi->dev, "status = %d\n", status);
430 dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
431 dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
432
433 if (status) {
434 if (lqi && (*len > lp->buf[1]))
435 *lqi = data[lp->buf[1]];
436 }
437 mutex_unlock(&lp->bmux);
438
439 return status;
440}
441
442static int
443at86rf230_ed(struct ieee802154_dev *dev, u8 *level)
444{
445 might_sleep();
446 BUG_ON(!level);
447 *level = 0xbe;
448 return 0;
449}
450
451static int
452at86rf230_state(struct ieee802154_dev *dev, int state)
453{
454 struct at86rf230_local *lp = dev->priv;
455 int rc;
456 u8 val;
457 u8 desired_status;
458
459 might_sleep();
460
461 if (state == STATE_FORCE_TX_ON)
462 desired_status = STATE_TX_ON;
463 else if (state == STATE_FORCE_TRX_OFF)
464 desired_status = STATE_TRX_OFF;
465 else
466 desired_status = state;
467
468 do {
469 rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &val);
470 if (rc)
471 goto err;
472 } while (val == STATE_TRANSITION_IN_PROGRESS);
473
474 if (val == desired_status)
475 return 0;
476
477 /* state is equal to phy states */
478 rc = at86rf230_write_subreg(lp, SR_TRX_CMD, state);
479 if (rc)
480 goto err;
481
482 do {
483 rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &val);
484 if (rc)
485 goto err;
486 } while (val == STATE_TRANSITION_IN_PROGRESS);
487
488
489 if (val == desired_status)
490 return 0;
491
492 pr_err("unexpected state change: %d, asked for %d\n", val, state);
493 return -EBUSY;
494
495err:
496 pr_err("error: %d\n", rc);
497 return rc;
498}
499
500static int
501at86rf230_start(struct ieee802154_dev *dev)
502{
503 struct at86rf230_local *lp = dev->priv;
504 u8 rc;
505
506 rc = at86rf230_write_subreg(lp, SR_RX_SAFE_MODE, 1);
507 if (rc)
508 return rc;
509
510 return at86rf230_state(dev, STATE_RX_ON);
511}
512
513static void
514at86rf230_stop(struct ieee802154_dev *dev)
515{
516 at86rf230_state(dev, STATE_FORCE_TRX_OFF);
517}
518
519static int
520at86rf230_channel(struct ieee802154_dev *dev, int page, int channel)
521{
522 struct at86rf230_local *lp = dev->priv;
523 int rc;
524
525 might_sleep();
526
527 if (page != 0 || channel < 11 || channel > 26) {
528 WARN_ON(1);
529 return -EINVAL;
530 }
531
532 rc = at86rf230_write_subreg(lp, SR_CHANNEL, channel);
533 msleep(1); /* Wait for PLL */
534 dev->phy->current_channel = channel;
535
536 return 0;
537}
538
539static int
540at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
541{
542 struct at86rf230_local *lp = dev->priv;
543 int rc;
544 unsigned long flags;
545
546 spin_lock(&lp->lock);
547 if (lp->irq_disabled) {
548 spin_unlock(&lp->lock);
549 return -EBUSY;
550 }
551 spin_unlock(&lp->lock);
552
553 might_sleep();
554
555 rc = at86rf230_state(dev, STATE_FORCE_TX_ON);
556 if (rc)
557 goto err;
558
559 spin_lock_irqsave(&lp->lock, flags);
560 lp->is_tx = 1;
561 INIT_COMPLETION(lp->tx_complete);
562 spin_unlock_irqrestore(&lp->lock, flags);
563
564 rc = at86rf230_write_fbuf(lp, skb->data, skb->len);
565 if (rc)
566 goto err_rx;
567
568 rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_BUSY_TX);
569 if (rc)
570 goto err_rx;
571
572 rc = wait_for_completion_interruptible(&lp->tx_complete);
573 if (rc < 0)
574 goto err_rx;
575
576 rc = at86rf230_start(dev);
577
578 return rc;
579
580err_rx:
581 at86rf230_start(dev);
582err:
583 pr_err("error: %d\n", rc);
584
585 spin_lock_irqsave(&lp->lock, flags);
586 lp->is_tx = 0;
587 spin_unlock_irqrestore(&lp->lock, flags);
588
589 return rc;
590}
591
592static int at86rf230_rx(struct at86rf230_local *lp)
593{
594 u8 len = 128, lqi = 0;
595 struct sk_buff *skb;
596
597 skb = alloc_skb(len, GFP_KERNEL);
598
599 if (!skb)
600 return -ENOMEM;
601
602 if (at86rf230_read_fbuf(lp, skb_put(skb, len), &len, &lqi))
603 goto err;
604
605 if (len < 2)
606 goto err;
607
608 skb_trim(skb, len - 2); /* We do not put CRC into the frame */
609
610 ieee802154_rx_irqsafe(lp->dev, skb, lqi);
611
612 dev_dbg(&lp->spi->dev, "READ_FBUF: %d %x\n", len, lqi);
613
614 return 0;
615err:
616 pr_debug("received frame is too small\n");
617
618 kfree_skb(skb);
619 return -EINVAL;
620}
621
622static struct ieee802154_ops at86rf230_ops = {
623 .owner = THIS_MODULE,
624 .xmit = at86rf230_xmit,
625 .ed = at86rf230_ed,
626 .set_channel = at86rf230_channel,
627 .start = at86rf230_start,
628 .stop = at86rf230_stop,
629};
630
631static void at86rf230_irqwork(struct work_struct *work)
632{
633 struct at86rf230_local *lp =
634 container_of(work, struct at86rf230_local, irqwork);
635 u8 status = 0, val;
636 int rc;
637 unsigned long flags;
638
639 rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &val);
640 status |= val;
641
642 status &= ~IRQ_PLL_LOCK; /* ignore */
643 status &= ~IRQ_RX_START; /* ignore */
644 status &= ~IRQ_AMI; /* ignore */
645 status &= ~IRQ_TRX_UR; /* FIXME: possibly handle ???*/
646
647 if (status & IRQ_TRX_END) {
648 spin_lock_irqsave(&lp->lock, flags);
649 status &= ~IRQ_TRX_END;
650 if (lp->is_tx) {
651 lp->is_tx = 0;
652 spin_unlock_irqrestore(&lp->lock, flags);
653 complete(&lp->tx_complete);
654 } else {
655 spin_unlock_irqrestore(&lp->lock, flags);
656 at86rf230_rx(lp);
657 }
658 }
659
660 spin_lock_irqsave(&lp->lock, flags);
661 lp->irq_disabled = 0;
662 spin_unlock_irqrestore(&lp->lock, flags);
663
664 enable_irq(lp->spi->irq);
665}
666
667static irqreturn_t at86rf230_isr(int irq, void *data)
668{
669 struct at86rf230_local *lp = data;
670
671 disable_irq_nosync(irq);
672
673 spin_lock(&lp->lock);
674 lp->irq_disabled = 1;
675 spin_unlock(&lp->lock);
676
677 schedule_work(&lp->irqwork);
678
679 return IRQ_HANDLED;
680}
681
682
683static int at86rf230_hw_init(struct at86rf230_local *lp)
684{
685 u8 status;
686 int rc;
687
688 rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
689 if (rc)
690 return rc;
691
692 dev_info(&lp->spi->dev, "Status: %02x\n", status);
693 if (status == STATE_P_ON) {
694 rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TRX_OFF);
695 if (rc)
696 return rc;
697 msleep(1);
698 rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
699 if (rc)
700 return rc;
701 dev_info(&lp->spi->dev, "Status: %02x\n", status);
702 }
703
704 rc = at86rf230_write_subreg(lp, SR_IRQ_MASK, 0xff); /* IRQ_TRX_UR |
705 * IRQ_CCA_ED |
706 * IRQ_TRX_END |
707 * IRQ_PLL_UNL |
708 * IRQ_PLL_LOCK
709 */
710 if (rc)
711 return rc;
712
713 /* CLKM changes are applied immediately */
714 rc = at86rf230_write_subreg(lp, SR_CLKM_SHA_SEL, 0x00);
715 if (rc)
716 return rc;
717
718 /* Turn CLKM Off */
719 rc = at86rf230_write_subreg(lp, SR_CLKM_CTRL, 0x00);
720 if (rc)
721 return rc;
722 /* Wait the next SLEEP cycle */
723 msleep(100);
724
725 rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TX_ON);
726 if (rc)
727 return rc;
728 msleep(1);
729
730 rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
731 if (rc)
732 return rc;
733 dev_info(&lp->spi->dev, "Status: %02x\n", status);
734
735 rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &status);
736 if (rc)
737 return rc;
738 if (!status) {
739 dev_err(&lp->spi->dev, "DVDD error\n");
740 return -EINVAL;
741 }
742
743 rc = at86rf230_read_subreg(lp, SR_AVDD_OK, &status);
744 if (rc)
745 return rc;
746 if (!status) {
747 dev_err(&lp->spi->dev, "AVDD error\n");
748 return -EINVAL;
749 }
750
751 return 0;
752}
753
754static int at86rf230_suspend(struct spi_device *spi, pm_message_t message)
755{
756 return 0;
757}
758
759static int at86rf230_resume(struct spi_device *spi)
760{
761 return 0;
762}
763
764static int at86rf230_fill_data(struct spi_device *spi)
765{
766 struct at86rf230_local *lp = spi_get_drvdata(spi);
767 struct at86rf230_platform_data *pdata = spi->dev.platform_data;
768
769 if (!pdata) {
770 dev_err(&spi->dev, "no platform_data\n");
771 return -EINVAL;
772 }
773
774 lp->rstn = pdata->rstn;
775 lp->slp_tr = pdata->slp_tr;
776 lp->dig2 = pdata->dig2;
777
778 return 0;
779}
780
781static int __devinit at86rf230_probe(struct spi_device *spi)
782{
783 struct ieee802154_dev *dev;
784 struct at86rf230_local *lp;
785 u8 man_id_0, man_id_1;
786 int rc;
787 const char *chip;
788 int supported = 0;
789
790 if (!spi->irq) {
791 dev_err(&spi->dev, "no IRQ specified\n");
792 return -EINVAL;
793 }
794
795 dev = ieee802154_alloc_device(sizeof(*lp), &at86rf230_ops);
796 if (!dev)
797 return -ENOMEM;
798
799 lp = dev->priv;
800 lp->dev = dev;
801
802 lp->spi = spi;
803
804 dev->priv = lp;
805 dev->parent = &spi->dev;
806 dev->extra_tx_headroom = 0;
807 /* We do support only 2.4 Ghz */
808 dev->phy->channels_supported[0] = 0x7FFF800;
809 dev->flags = IEEE802154_HW_OMIT_CKSUM;
810
811 mutex_init(&lp->bmux);
812 INIT_WORK(&lp->irqwork, at86rf230_irqwork);
813 spin_lock_init(&lp->lock);
814 init_completion(&lp->tx_complete);
815
816 spi_set_drvdata(spi, lp);
817
818 rc = at86rf230_fill_data(spi);
819 if (rc)
820 goto err_fill;
821
822 rc = gpio_request(lp->rstn, "rstn");
823 if (rc)
824 goto err_rstn;
825
826 if (gpio_is_valid(lp->slp_tr)) {
827 rc = gpio_request(lp->slp_tr, "slp_tr");
828 if (rc)
829 goto err_slp_tr;
830 }
831
832 rc = gpio_direction_output(lp->rstn, 1);
833 if (rc)
834 goto err_gpio_dir;
835
836 if (gpio_is_valid(lp->slp_tr)) {
837 rc = gpio_direction_output(lp->slp_tr, 0);
838 if (rc)
839 goto err_gpio_dir;
840 }
841
842 /* Reset */
843 msleep(1);
844 gpio_set_value(lp->rstn, 0);
845 msleep(1);
846 gpio_set_value(lp->rstn, 1);
847 msleep(1);
848
849 rc = at86rf230_read_subreg(lp, SR_MAN_ID_0, &man_id_0);
850 if (rc)
851 goto err_gpio_dir;
852 rc = at86rf230_read_subreg(lp, SR_MAN_ID_1, &man_id_1);
853 if (rc)
854 goto err_gpio_dir;
855
856 if (man_id_1 != 0x00 || man_id_0 != 0x1f) {
857 dev_err(&spi->dev, "Non-Atmel dev found (MAN_ID %02x %02x)\n",
858 man_id_1, man_id_0);
859 rc = -EINVAL;
860 goto err_gpio_dir;
861 }
862
863 rc = at86rf230_read_subreg(lp, SR_PART_NUM, &lp->part);
864 if (rc)
865 goto err_gpio_dir;
866
867 rc = at86rf230_read_subreg(lp, SR_VERSION_NUM, &lp->vers);
868 if (rc)
869 goto err_gpio_dir;
870
871 switch (lp->part) {
872 case 2:
873 chip = "at86rf230";
874 /* supported = 1; FIXME: should be easy to support; */
875 break;
876 case 3:
877 chip = "at86rf231";
878 supported = 1;
879 break;
880 default:
881 chip = "UNKNOWN";
882 break;
883 }
884
885 dev_info(&spi->dev, "Detected %s chip version %d\n", chip, lp->vers);
886 if (!supported) {
887 rc = -ENOTSUPP;
888 goto err_gpio_dir;
889 }
890
891 rc = at86rf230_hw_init(lp);
892 if (rc)
893 goto err_gpio_dir;
894
895 rc = request_irq(spi->irq, at86rf230_isr, IRQF_SHARED,
896 dev_name(&spi->dev), lp);
897 if (rc)
898 goto err_gpio_dir;
899
900 rc = ieee802154_register_device(lp->dev);
901 if (rc)
902 goto err_irq;
903
904 return rc;
905
906 ieee802154_unregister_device(lp->dev);
907err_irq:
908 free_irq(spi->irq, lp);
909 flush_work(&lp->irqwork);
910err_gpio_dir:
911 if (gpio_is_valid(lp->slp_tr))
912 gpio_free(lp->slp_tr);
913err_slp_tr:
914 gpio_free(lp->rstn);
915err_rstn:
916err_fill:
917 spi_set_drvdata(spi, NULL);
918 mutex_destroy(&lp->bmux);
919 ieee802154_free_device(lp->dev);
920 return rc;
921}
922
923static int __devexit at86rf230_remove(struct spi_device *spi)
924{
925 struct at86rf230_local *lp = spi_get_drvdata(spi);
926
927 ieee802154_unregister_device(lp->dev);
928
929 free_irq(spi->irq, lp);
930 flush_work(&lp->irqwork);
931
932 if (gpio_is_valid(lp->slp_tr))
933 gpio_free(lp->slp_tr);
934 gpio_free(lp->rstn);
935
936 spi_set_drvdata(spi, NULL);
937 mutex_destroy(&lp->bmux);
938 ieee802154_free_device(lp->dev);
939
940 dev_dbg(&spi->dev, "unregistered at86rf230\n");
941 return 0;
942}
943
944static struct spi_driver at86rf230_driver = {
945 .driver = {
946 .name = "at86rf230",
947 .owner = THIS_MODULE,
948 },
949 .probe = at86rf230_probe,
950 .remove = __devexit_p(at86rf230_remove),
951 .suspend = at86rf230_suspend,
952 .resume = at86rf230_resume,
953};
954
955static int __init at86rf230_init(void)
956{
957 return spi_register_driver(&at86rf230_driver);
958}
959module_init(at86rf230_init);
960
961static void __exit at86rf230_exit(void)
962{
963 spi_unregister_driver(&at86rf230_driver);
964}
965module_exit(at86rf230_exit);
966
967MODULE_DESCRIPTION("AT86RF230 Transceiver Driver");
968MODULE_LICENSE("GPL v2");
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index e497dfbee435..3ae2bfd31015 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -108,12 +108,14 @@ void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq,
108 unsigned char *prev_tail; 108 unsigned char *prev_tail;
109 109
110 prev_tail = skb_tail_pointer(skb); 110 prev_tail = skb_tail_pointer(skb);
111 *nlh = NLMSG_NEW(skb, 0, seq, RDMA_NL_GET_TYPE(client, op), 111 *nlh = nlmsg_put(skb, 0, seq, RDMA_NL_GET_TYPE(client, op),
112 len, NLM_F_MULTI); 112 len, NLM_F_MULTI);
113 if (!*nlh)
114 goto out_nlmsg_trim;
113 (*nlh)->nlmsg_len = skb_tail_pointer(skb) - prev_tail; 115 (*nlh)->nlmsg_len = skb_tail_pointer(skb) - prev_tail;
114 return NLMSG_DATA(*nlh); 116 return nlmsg_data(*nlh);
115 117
116nlmsg_failure: 118out_nlmsg_trim:
117 nlmsg_trim(skb, prev_tail); 119 nlmsg_trim(skb, prev_tail);
118 return NULL; 120 return NULL;
119} 121}
@@ -171,8 +173,11 @@ static void ibnl_rcv(struct sk_buff *skb)
171 173
172int __init ibnl_init(void) 174int __init ibnl_init(void)
173{ 175{
174 nls = netlink_kernel_create(&init_net, NETLINK_RDMA, 0, ibnl_rcv, 176 struct netlink_kernel_cfg cfg = {
175 NULL, THIS_MODULE); 177 .input = ibnl_rcv,
178 };
179
180 nls = netlink_kernel_create(&init_net, NETLINK_RDMA, THIS_MODULE, &cfg);
176 if (!nls) { 181 if (!nls) {
177 pr_warn("Failed to create netlink socket\n"); 182 pr_warn("Failed to create netlink socket\n");
178 return -ENOMEM; 183 return -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 740dcc065cf2..77b6b182778a 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1374,7 +1374,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1374 goto reject; 1374 goto reject;
1375 } 1375 }
1376 dst = &rt->dst; 1376 dst = &rt->dst;
1377 l2t = t3_l2t_get(tdev, dst, NULL); 1377 l2t = t3_l2t_get(tdev, dst, NULL, &req->peer_ip);
1378 if (!l2t) { 1378 if (!l2t) {
1379 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", 1379 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
1380 __func__); 1380 __func__);
@@ -1942,7 +1942,8 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1942 goto fail3; 1942 goto fail3;
1943 } 1943 }
1944 ep->dst = &rt->dst; 1944 ep->dst = &rt->dst;
1945 ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL); 1945 ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL,
1946 &cm_id->remote_addr.sin_addr.s_addr);
1946 if (!ep->l2t) { 1947 if (!ep->l2t) {
1947 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); 1948 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
1948 err = -ENOMEM; 1949 err = -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 3530c41fcd1f..a07b774e7864 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -718,26 +718,53 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
718 return ret; 718 return ret;
719} 719}
720 720
721struct mlx4_ib_steering {
722 struct list_head list;
723 u64 reg_id;
724 union ib_gid gid;
725};
726
721static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 727static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
722{ 728{
723 int err; 729 int err;
724 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); 730 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
725 struct mlx4_ib_qp *mqp = to_mqp(ibqp); 731 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
732 u64 reg_id;
733 struct mlx4_ib_steering *ib_steering = NULL;
734
735 if (mdev->dev->caps.steering_mode ==
736 MLX4_STEERING_MODE_DEVICE_MANAGED) {
737 ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
738 if (!ib_steering)
739 return -ENOMEM;
740 }
726 741
727 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, 742 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
728 !!(mqp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), 743 !!(mqp->flags &
729 MLX4_PROT_IB_IPV6); 744 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
745 MLX4_PROT_IB_IPV6, &reg_id);
730 if (err) 746 if (err)
731 return err; 747 goto err_malloc;
732 748
733 err = add_gid_entry(ibqp, gid); 749 err = add_gid_entry(ibqp, gid);
734 if (err) 750 if (err)
735 goto err_add; 751 goto err_add;
736 752
753 if (ib_steering) {
754 memcpy(ib_steering->gid.raw, gid->raw, 16);
755 ib_steering->reg_id = reg_id;
756 mutex_lock(&mqp->mutex);
757 list_add(&ib_steering->list, &mqp->steering_rules);
758 mutex_unlock(&mqp->mutex);
759 }
737 return 0; 760 return 0;
738 761
739err_add: 762err_add:
740 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, MLX4_PROT_IB_IPV6); 763 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
764 MLX4_PROT_IB_IPV6, reg_id);
765err_malloc:
766 kfree(ib_steering);
767
741 return err; 768 return err;
742} 769}
743 770
@@ -765,9 +792,30 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
765 u8 mac[6]; 792 u8 mac[6];
766 struct net_device *ndev; 793 struct net_device *ndev;
767 struct mlx4_ib_gid_entry *ge; 794 struct mlx4_ib_gid_entry *ge;
795 u64 reg_id = 0;
796
797 if (mdev->dev->caps.steering_mode ==
798 MLX4_STEERING_MODE_DEVICE_MANAGED) {
799 struct mlx4_ib_steering *ib_steering;
800
801 mutex_lock(&mqp->mutex);
802 list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
803 if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
804 list_del(&ib_steering->list);
805 break;
806 }
807 }
808 mutex_unlock(&mqp->mutex);
809 if (&ib_steering->list == &mqp->steering_rules) {
810 pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
811 return -EINVAL;
812 }
813 reg_id = ib_steering->reg_id;
814 kfree(ib_steering);
815 }
768 816
769 err = mlx4_multicast_detach(mdev->dev, 817 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
770 &mqp->mqp, gid->raw, MLX4_PROT_IB_IPV6); 818 MLX4_PROT_IB_IPV6, reg_id);
771 if (err) 819 if (err)
772 return err; 820 return err;
773 821
@@ -1111,7 +1159,8 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
1111 sprintf(name, "mlx4-ib-%d-%d@%s", 1159 sprintf(name, "mlx4-ib-%d-%d@%s",
1112 i, j, dev->pdev->bus->name); 1160 i, j, dev->pdev->bus->name);
1113 /* Set IRQ for specific name (per ring) */ 1161 /* Set IRQ for specific name (per ring) */
1114 if (mlx4_assign_eq(dev, name, &ibdev->eq_table[eq])) { 1162 if (mlx4_assign_eq(dev, name, NULL,
1163 &ibdev->eq_table[eq])) {
1115 /* Use legacy (same as mlx4_en driver) */ 1164 /* Use legacy (same as mlx4_en driver) */
1116 pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq); 1165 pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq);
1117 ibdev->eq_table[eq] = 1166 ibdev->eq_table[eq] =
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index ff36655d23d3..42df4f7a6a5b 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -163,6 +163,7 @@ struct mlx4_ib_qp {
163 u8 state; 163 u8 state;
164 int mlx_type; 164 int mlx_type;
165 struct list_head gid_list; 165 struct list_head gid_list;
166 struct list_head steering_rules;
166}; 167};
167 168
168struct mlx4_ib_srq { 169struct mlx4_ib_srq {
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 8d4ed24aef93..6af19f6c2b11 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -495,6 +495,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
495 spin_lock_init(&qp->sq.lock); 495 spin_lock_init(&qp->sq.lock);
496 spin_lock_init(&qp->rq.lock); 496 spin_lock_init(&qp->rq.lock);
497 INIT_LIST_HEAD(&qp->gid_list); 497 INIT_LIST_HEAD(&qp->gid_list);
498 INIT_LIST_HEAD(&qp->steering_rules);
498 499
499 qp->state = IB_QPS_RESET; 500 qp->state = IB_QPS_RESET;
500 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) 501 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 014504d8e43c..1ca732201f33 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1397,7 +1397,7 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
1397 int e = skb_queue_empty(&priv->cm.skb_queue); 1397 int e = skb_queue_empty(&priv->cm.skb_queue);
1398 1398
1399 if (skb_dst(skb)) 1399 if (skb_dst(skb))
1400 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); 1400 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
1401 1401
1402 skb_queue_tail(&priv->cm.skb_queue, skb); 1402 skb_queue_tail(&priv->cm.skb_queue, skb);
1403 if (e) 1403 if (e)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 3974c290b667..bbee4b2d7a13 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -715,7 +715,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
715 715
716 rcu_read_lock(); 716 rcu_read_lock();
717 if (likely(skb_dst(skb))) { 717 if (likely(skb_dst(skb))) {
718 n = dst_get_neighbour_noref(skb_dst(skb)); 718 n = dst_neigh_lookup_skb(skb_dst(skb), skb);
719 if (!n) { 719 if (!n) {
720 ++dev->stats.tx_dropped; 720 ++dev->stats.tx_dropped;
721 dev_kfree_skb_any(skb); 721 dev_kfree_skb_any(skb);
@@ -797,6 +797,8 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
797 } 797 }
798 } 798 }
799unlock: 799unlock:
800 if (n)
801 neigh_release(n);
800 rcu_read_unlock(); 802 rcu_read_unlock();
801 return NETDEV_TX_OK; 803 return NETDEV_TX_OK;
802} 804}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 20ebc6fd1bb9..7cecb16d3d48 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -658,9 +658,15 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
658void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb) 658void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
659{ 659{
660 struct ipoib_dev_priv *priv = netdev_priv(dev); 660 struct ipoib_dev_priv *priv = netdev_priv(dev);
661 struct dst_entry *dst = skb_dst(skb);
661 struct ipoib_mcast *mcast; 662 struct ipoib_mcast *mcast;
663 struct neighbour *n;
662 unsigned long flags; 664 unsigned long flags;
663 665
666 n = NULL;
667 if (dst)
668 n = dst_neigh_lookup_skb(dst, skb);
669
664 spin_lock_irqsave(&priv->lock, flags); 670 spin_lock_irqsave(&priv->lock, flags);
665 671
666 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) || 672 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) ||
@@ -715,29 +721,28 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
715 721
716out: 722out:
717 if (mcast && mcast->ah) { 723 if (mcast && mcast->ah) {
718 struct dst_entry *dst = skb_dst(skb); 724 if (n) {
719 struct neighbour *n = NULL; 725 if (!*to_ipoib_neigh(n)) {
720 726 struct ipoib_neigh *neigh;
721 rcu_read_lock(); 727
722 if (dst) 728 neigh = ipoib_neigh_alloc(n, skb->dev);
723 n = dst_get_neighbour_noref(dst); 729 if (neigh) {
724 if (n && !*to_ipoib_neigh(n)) { 730 kref_get(&mcast->ah->ref);
725 struct ipoib_neigh *neigh = ipoib_neigh_alloc(n, 731 neigh->ah = mcast->ah;
726 skb->dev); 732 list_add_tail(&neigh->list,
727 733 &mcast->neigh_list);
728 if (neigh) { 734 }
729 kref_get(&mcast->ah->ref);
730 neigh->ah = mcast->ah;
731 list_add_tail(&neigh->list, &mcast->neigh_list);
732 } 735 }
736 neigh_release(n);
733 } 737 }
734 rcu_read_unlock();
735 spin_unlock_irqrestore(&priv->lock, flags); 738 spin_unlock_irqrestore(&priv->lock, flags);
736 ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN); 739 ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
737 return; 740 return;
738 } 741 }
739 742
740unlock: 743unlock:
744 if (n)
745 neigh_release(n);
741 spin_unlock_irqrestore(&priv->lock, flags); 746 spin_unlock_irqrestore(&priv->lock, flags);
742} 747}
743 748
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
index 27e4a3e21d64..68452b768da2 100644
--- a/drivers/isdn/gigaset/capi.c
+++ b/drivers/isdn/gigaset/capi.c
@@ -288,6 +288,7 @@ static inline void dump_rawmsg(enum debuglevel level, const char *tag,
288 * format CAPI IE as string 288 * format CAPI IE as string
289 */ 289 */
290 290
291#ifdef CONFIG_GIGASET_DEBUG
291static const char *format_ie(const char *ie) 292static const char *format_ie(const char *ie)
292{ 293{
293 static char result[3 * MAX_FMT_IE_LEN]; 294 static char result[3 * MAX_FMT_IE_LEN];
@@ -313,6 +314,7 @@ static const char *format_ie(const char *ie)
313 *--pout = 0; 314 *--pout = 0;
314 return result; 315 return result;
315} 316}
317#endif
316 318
317/* 319/*
318 * emit DATA_B3_CONF message 320 * emit DATA_B3_CONF message
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index c65c3440cd70..114f3bcba1b0 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -2084,13 +2084,21 @@ hfcsusb_probe(struct usb_interface *intf, const struct usb_device_id *id)
2084 /* create the control pipes needed for register access */ 2084 /* create the control pipes needed for register access */
2085 hw->ctrl_in_pipe = usb_rcvctrlpipe(hw->dev, 0); 2085 hw->ctrl_in_pipe = usb_rcvctrlpipe(hw->dev, 0);
2086 hw->ctrl_out_pipe = usb_sndctrlpipe(hw->dev, 0); 2086 hw->ctrl_out_pipe = usb_sndctrlpipe(hw->dev, 0);
2087
2088 driver_info = (struct hfcsusb_vdata *)
2089 hfcsusb_idtab[vend_idx].driver_info;
2090
2087 hw->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL); 2091 hw->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL);
2092 if (!hw->ctrl_urb) {
2093 pr_warn("%s: No memory for control urb\n",
2094 driver_info->vend_name);
2095 kfree(hw);
2096 return -ENOMEM;
2097 }
2088 2098
2089 driver_info = 2099 pr_info("%s: %s: detected \"%s\" (%s, if=%d alt=%d)\n",
2090 (struct hfcsusb_vdata *)hfcsusb_idtab[vend_idx].driver_info; 2100 hw->name, __func__, driver_info->vend_name,
2091 printk(KERN_DEBUG "%s: %s: detected \"%s\" (%s, if=%d alt=%d)\n", 2101 conf_str[small_match], ifnum, alt_used);
2092 hw->name, __func__, driver_info->vend_name,
2093 conf_str[small_match], ifnum, alt_used);
2094 2102
2095 if (setup_instance(hw, dev->dev.parent)) 2103 if (setup_instance(hw, dev->dev.parent))
2096 return -EIO; 2104 return -EIO;
diff --git a/drivers/isdn/hisax/hfc_usb.c b/drivers/isdn/hisax/hfc_usb.c
index 84f9c8103078..849a80752685 100644
--- a/drivers/isdn/hisax/hfc_usb.c
+++ b/drivers/isdn/hisax/hfc_usb.c
@@ -1483,13 +1483,21 @@ hfc_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
1483 usb_rcvctrlpipe(context->dev, 0); 1483 usb_rcvctrlpipe(context->dev, 0);
1484 context->ctrl_out_pipe = 1484 context->ctrl_out_pipe =
1485 usb_sndctrlpipe(context->dev, 0); 1485 usb_sndctrlpipe(context->dev, 0);
1486
1487 driver_info = (hfcsusb_vdata *)
1488 hfcusb_idtab[vend_idx].driver_info;
1489
1486 context->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL); 1490 context->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL);
1487 1491
1488 driver_info = 1492 if (!context->ctrl_urb) {
1489 (hfcsusb_vdata *) hfcusb_idtab[vend_idx]. 1493 pr_warn("%s: No memory for control urb\n",
1490 driver_info; 1494 driver_info->vend_name);
1491 printk(KERN_INFO "HFC-S USB: detected \"%s\"\n", 1495 kfree(context);
1492 driver_info->vend_name); 1496 return -ENOMEM;
1497 }
1498
1499 pr_info("HFC-S USB: detected \"%s\"\n",
1500 driver_info->vend_name);
1493 1501
1494 DBG(HFCUSB_DBG_INIT, 1502 DBG(HFCUSB_DBG_INIT,
1495 "HFC-S USB: Endpoint-Config: %s (if=%d alt=%d), E-Channel(%d)", 1503 "HFC-S USB: Endpoint-Config: %s (if=%d alt=%d), E-Channel(%d)",
diff --git a/drivers/isdn/hisax/isurf.c b/drivers/isdn/hisax/isurf.c
index ea2717215296..c1530fe248c2 100644
--- a/drivers/isdn/hisax/isurf.c
+++ b/drivers/isdn/hisax/isurf.c
@@ -231,6 +231,11 @@ setup_isurf(struct IsdnCard *card)
231 } 231 }
232 pnp_disable_dev(pnp_d); 232 pnp_disable_dev(pnp_d);
233 err = pnp_activate_dev(pnp_d); 233 err = pnp_activate_dev(pnp_d);
234 if (err < 0) {
235 pr_warn("%s: pnp_activate_dev ret=%d\n",
236 __func__, err);
237 return 0;
238 }
234 cs->hw.isurf.reset = pnp_port_start(pnp_d, 0); 239 cs->hw.isurf.reset = pnp_port_start(pnp_d, 0);
235 cs->hw.isurf.phymem = pnp_mem_start(pnp_d, 1); 240 cs->hw.isurf.phymem = pnp_mem_start(pnp_d, 1);
236 cs->irq = pnp_irq(pnp_d, 0); 241 cs->irq = pnp_irq(pnp_d, 0);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 2661f6e366f9..154f3ef07631 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -511,7 +511,6 @@ config USB_SWITCH_FSA9480
511source "drivers/misc/c2port/Kconfig" 511source "drivers/misc/c2port/Kconfig"
512source "drivers/misc/eeprom/Kconfig" 512source "drivers/misc/eeprom/Kconfig"
513source "drivers/misc/cb710/Kconfig" 513source "drivers/misc/cb710/Kconfig"
514source "drivers/misc/iwmc3200top/Kconfig"
515source "drivers/misc/ti-st/Kconfig" 514source "drivers/misc/ti-st/Kconfig"
516source "drivers/misc/lis3lv02d/Kconfig" 515source "drivers/misc/lis3lv02d/Kconfig"
517source "drivers/misc/carma/Kconfig" 516source "drivers/misc/carma/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 456972faaeb3..b88df7a350b8 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -36,7 +36,6 @@ obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o
36obj-$(CONFIG_DS1682) += ds1682.o 36obj-$(CONFIG_DS1682) += ds1682.o
37obj-$(CONFIG_TI_DAC7512) += ti_dac7512.o 37obj-$(CONFIG_TI_DAC7512) += ti_dac7512.o
38obj-$(CONFIG_C2PORT) += c2port/ 38obj-$(CONFIG_C2PORT) += c2port/
39obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/
40obj-$(CONFIG_HMC6352) += hmc6352.o 39obj-$(CONFIG_HMC6352) += hmc6352.o
41obj-y += eeprom/ 40obj-y += eeprom/
42obj-y += cb710/ 41obj-y += cb710/
diff --git a/drivers/misc/iwmc3200top/Kconfig b/drivers/misc/iwmc3200top/Kconfig
deleted file mode 100644
index 9e4b88fb57f1..000000000000
--- a/drivers/misc/iwmc3200top/Kconfig
+++ /dev/null
@@ -1,20 +0,0 @@
1config IWMC3200TOP
2 tristate "Intel Wireless MultiCom Top Driver"
3 depends on MMC && EXPERIMENTAL
4 select FW_LOADER
5 ---help---
6 Intel Wireless MultiCom 3200 Top driver is responsible for
7 for firmware load and enabled coms enumeration
8
9config IWMC3200TOP_DEBUG
10 bool "Enable full debug output of iwmc3200top Driver"
11 depends on IWMC3200TOP
12 ---help---
13 Enable full debug output of iwmc3200top Driver
14
15config IWMC3200TOP_DEBUGFS
16 bool "Enable Debugfs debugging interface for iwmc3200top"
17 depends on IWMC3200TOP
18 ---help---
19 Enable creation of debugfs files for iwmc3200top
20
diff --git a/drivers/misc/iwmc3200top/Makefile b/drivers/misc/iwmc3200top/Makefile
deleted file mode 100644
index fbf53fb4634e..000000000000
--- a/drivers/misc/iwmc3200top/Makefile
+++ /dev/null
@@ -1,29 +0,0 @@
1# iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
2# drivers/misc/iwmc3200top/Makefile
3#
4# Copyright (C) 2009 Intel Corporation. All rights reserved.
5#
6# This program is free software; you can redistribute it and/or
7# modify it under the terms of the GNU General Public License version
8# 2 as published by the Free Software Foundation.
9#
10# This program is distributed in the hope that it will be useful,
11# but WITHOUT ANY WARRANTY; without even the implied warranty of
12# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13# GNU General Public License for more details.
14#
15# You should have received a copy of the GNU General Public License
16# along with this program; if not, write to the Free Software
17# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18# 02110-1301, USA.
19#
20#
21# Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
22# -
23#
24#
25
26obj-$(CONFIG_IWMC3200TOP) += iwmc3200top.o
27iwmc3200top-objs := main.o fw-download.o
28iwmc3200top-$(CONFIG_IWMC3200TOP_DEBUG) += log.o
29iwmc3200top-$(CONFIG_IWMC3200TOP_DEBUGFS) += debugfs.o
diff --git a/drivers/misc/iwmc3200top/debugfs.c b/drivers/misc/iwmc3200top/debugfs.c
deleted file mode 100644
index 62fbaec48207..000000000000
--- a/drivers/misc/iwmc3200top/debugfs.c
+++ /dev/null
@@ -1,137 +0,0 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/debufs.c
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#include <linux/kernel.h>
28#include <linux/slab.h>
29#include <linux/string.h>
30#include <linux/ctype.h>
31#include <linux/mmc/sdio_func.h>
32#include <linux/mmc/sdio.h>
33#include <linux/debugfs.h>
34
35#include "iwmc3200top.h"
36#include "fw-msg.h"
37#include "log.h"
38#include "debugfs.h"
39
40
41
42/* Constants definition */
43#define HEXADECIMAL_RADIX 16
44
45/* Functions definition */
46
47
48#define DEBUGFS_ADD(name, parent) do { \
49 dbgfs->dbgfs_##parent##_files.file_##name = \
50 debugfs_create_file(#name, 0644, dbgfs->dir_##parent, priv, \
51 &iwmct_dbgfs_##name##_ops); \
52} while (0)
53
54#define DEBUGFS_RM(name) do { \
55 debugfs_remove(name); \
56 name = NULL; \
57} while (0)
58
59#define DEBUGFS_READ_FUNC(name) \
60ssize_t iwmct_dbgfs_##name##_read(struct file *file, \
61 char __user *user_buf, \
62 size_t count, loff_t *ppos);
63
64#define DEBUGFS_WRITE_FUNC(name) \
65ssize_t iwmct_dbgfs_##name##_write(struct file *file, \
66 const char __user *user_buf, \
67 size_t count, loff_t *ppos);
68
69#define DEBUGFS_READ_FILE_OPS(name) \
70 DEBUGFS_READ_FUNC(name) \
71 static const struct file_operations iwmct_dbgfs_##name##_ops = { \
72 .read = iwmct_dbgfs_##name##_read, \
73 .open = iwmct_dbgfs_open_file_generic, \
74 .llseek = generic_file_llseek, \
75 };
76
77#define DEBUGFS_WRITE_FILE_OPS(name) \
78 DEBUGFS_WRITE_FUNC(name) \
79 static const struct file_operations iwmct_dbgfs_##name##_ops = { \
80 .write = iwmct_dbgfs_##name##_write, \
81 .open = iwmct_dbgfs_open_file_generic, \
82 .llseek = generic_file_llseek, \
83 };
84
85#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
86 DEBUGFS_READ_FUNC(name) \
87 DEBUGFS_WRITE_FUNC(name) \
88 static const struct file_operations iwmct_dbgfs_##name##_ops = {\
89 .write = iwmct_dbgfs_##name##_write, \
90 .read = iwmct_dbgfs_##name##_read, \
91 .open = iwmct_dbgfs_open_file_generic, \
92 .llseek = generic_file_llseek, \
93 };
94
95
96/* Debugfs file ops definitions */
97
98/*
99 * Create the debugfs files and directories
100 *
101 */
102void iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name)
103{
104 struct iwmct_debugfs *dbgfs;
105
106 dbgfs = kzalloc(sizeof(struct iwmct_debugfs), GFP_KERNEL);
107 if (!dbgfs) {
108 LOG_ERROR(priv, DEBUGFS, "failed to allocate %zd bytes\n",
109 sizeof(struct iwmct_debugfs));
110 return;
111 }
112
113 priv->dbgfs = dbgfs;
114 dbgfs->name = name;
115 dbgfs->dir_drv = debugfs_create_dir(name, NULL);
116 if (!dbgfs->dir_drv) {
117 LOG_ERROR(priv, DEBUGFS, "failed to create debugfs dir\n");
118 return;
119 }
120
121 return;
122}
123
124/**
125 * Remove the debugfs files and directories
126 *
127 */
128void iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs)
129{
130 if (!dbgfs)
131 return;
132
133 DEBUGFS_RM(dbgfs->dir_drv);
134 kfree(dbgfs);
135 dbgfs = NULL;
136}
137
diff --git a/drivers/misc/iwmc3200top/debugfs.h b/drivers/misc/iwmc3200top/debugfs.h
deleted file mode 100644
index 71d45759b40f..000000000000
--- a/drivers/misc/iwmc3200top/debugfs.h
+++ /dev/null
@@ -1,58 +0,0 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/debufs.h
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#ifndef __DEBUGFS_H__
28#define __DEBUGFS_H__
29
30
31#ifdef CONFIG_IWMC3200TOP_DEBUGFS
32
33struct iwmct_debugfs {
34 const char *name;
35 struct dentry *dir_drv;
36 struct dir_drv_files {
37 } dbgfs_drv_files;
38};
39
40void iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name);
41void iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs);
42
43#else /* CONFIG_IWMC3200TOP_DEBUGFS */
44
45struct iwmct_debugfs;
46
47static inline void
48iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name)
49{}
50
51static inline void
52iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs)
53{}
54
55#endif /* CONFIG_IWMC3200TOP_DEBUGFS */
56
57#endif /* __DEBUGFS_H__ */
58
diff --git a/drivers/misc/iwmc3200top/fw-download.c b/drivers/misc/iwmc3200top/fw-download.c
deleted file mode 100644
index e27afde6e99f..000000000000
--- a/drivers/misc/iwmc3200top/fw-download.c
+++ /dev/null
@@ -1,358 +0,0 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/fw-download.c
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#include <linux/firmware.h>
28#include <linux/mmc/sdio_func.h>
29#include <linux/slab.h>
30#include <asm/unaligned.h>
31
32#include "iwmc3200top.h"
33#include "log.h"
34#include "fw-msg.h"
35
36#define CHECKSUM_BYTES_NUM sizeof(u32)
37
38/**
39 init parser struct with file
40 */
41static int iwmct_fw_parser_init(struct iwmct_priv *priv, const u8 *file,
42 size_t file_size, size_t block_size)
43{
44 struct iwmct_parser *parser = &priv->parser;
45 struct iwmct_fw_hdr *fw_hdr = &parser->versions;
46
47 LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
48
49 LOG_INFO(priv, FW_DOWNLOAD, "file_size=%zd\n", file_size);
50
51 parser->file = file;
52 parser->file_size = file_size;
53 parser->cur_pos = 0;
54 parser->entry_point = 0;
55 parser->buf = kzalloc(block_size, GFP_KERNEL);
56 if (!parser->buf) {
57 LOG_ERROR(priv, FW_DOWNLOAD, "kzalloc error\n");
58 return -ENOMEM;
59 }
60 parser->buf_size = block_size;
61
62 /* extract fw versions */
63 memcpy(fw_hdr, parser->file, sizeof(struct iwmct_fw_hdr));
64 LOG_INFO(priv, FW_DOWNLOAD, "fw versions are:\n"
65 "top %u.%u.%u gps %u.%u.%u bt %u.%u.%u tic %s\n",
66 fw_hdr->top_major, fw_hdr->top_minor, fw_hdr->top_revision,
67 fw_hdr->gps_major, fw_hdr->gps_minor, fw_hdr->gps_revision,
68 fw_hdr->bt_major, fw_hdr->bt_minor, fw_hdr->bt_revision,
69 fw_hdr->tic_name);
70
71 parser->cur_pos += sizeof(struct iwmct_fw_hdr);
72
73 LOG_TRACE(priv, FW_DOWNLOAD, "<--\n");
74 return 0;
75}
76
77static bool iwmct_checksum(struct iwmct_priv *priv)
78{
79 struct iwmct_parser *parser = &priv->parser;
80 __le32 *file = (__le32 *)parser->file;
81 int i, pad, steps;
82 u32 accum = 0;
83 u32 checksum;
84 u32 mask = 0xffffffff;
85
86 pad = (parser->file_size - CHECKSUM_BYTES_NUM) % 4;
87 steps = (parser->file_size - CHECKSUM_BYTES_NUM) / 4;
88
89 LOG_INFO(priv, FW_DOWNLOAD, "pad=%d steps=%d\n", pad, steps);
90
91 for (i = 0; i < steps; i++)
92 accum += le32_to_cpu(file[i]);
93
94 if (pad) {
95 mask <<= 8 * (4 - pad);
96 accum += le32_to_cpu(file[steps]) & mask;
97 }
98
99 checksum = get_unaligned_le32((__le32 *)(parser->file +
100 parser->file_size - CHECKSUM_BYTES_NUM));
101
102 LOG_INFO(priv, FW_DOWNLOAD,
103 "compare checksum accum=0x%x to checksum=0x%x\n",
104 accum, checksum);
105
106 return checksum == accum;
107}
108
109static int iwmct_parse_next_section(struct iwmct_priv *priv, const u8 **p_sec,
110 size_t *sec_size, __le32 *sec_addr)
111{
112 struct iwmct_parser *parser = &priv->parser;
113 struct iwmct_dbg *dbg = &priv->dbg;
114 struct iwmct_fw_sec_hdr *sec_hdr;
115
116 LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
117
118 while (parser->cur_pos + sizeof(struct iwmct_fw_sec_hdr)
119 <= parser->file_size) {
120
121 sec_hdr = (struct iwmct_fw_sec_hdr *)
122 (parser->file + parser->cur_pos);
123 parser->cur_pos += sizeof(struct iwmct_fw_sec_hdr);
124
125 LOG_INFO(priv, FW_DOWNLOAD,
126 "sec hdr: type=%s addr=0x%x size=%d\n",
127 sec_hdr->type, sec_hdr->target_addr,
128 sec_hdr->data_size);
129
130 if (strcmp(sec_hdr->type, "ENT") == 0)
131 parser->entry_point = le32_to_cpu(sec_hdr->target_addr);
132 else if (strcmp(sec_hdr->type, "LBL") == 0)
133 strcpy(dbg->label_fw, parser->file + parser->cur_pos);
134 else if (((strcmp(sec_hdr->type, "TOP") == 0) &&
135 (priv->barker & BARKER_DNLOAD_TOP_MSK)) ||
136 ((strcmp(sec_hdr->type, "GPS") == 0) &&
137 (priv->barker & BARKER_DNLOAD_GPS_MSK)) ||
138 ((strcmp(sec_hdr->type, "BTH") == 0) &&
139 (priv->barker & BARKER_DNLOAD_BT_MSK))) {
140 *sec_addr = sec_hdr->target_addr;
141 *sec_size = le32_to_cpu(sec_hdr->data_size);
142 *p_sec = parser->file + parser->cur_pos;
143 parser->cur_pos += le32_to_cpu(sec_hdr->data_size);
144 return 1;
145 } else if (strcmp(sec_hdr->type, "LOG") != 0)
146 LOG_WARNING(priv, FW_DOWNLOAD,
147 "skipping section type %s\n",
148 sec_hdr->type);
149
150 parser->cur_pos += le32_to_cpu(sec_hdr->data_size);
151 LOG_INFO(priv, FW_DOWNLOAD,
152 "finished with section cur_pos=%zd\n", parser->cur_pos);
153 }
154
155 LOG_TRACE(priv, INIT, "<--\n");
156 return 0;
157}
158
159static int iwmct_download_section(struct iwmct_priv *priv, const u8 *p_sec,
160 size_t sec_size, __le32 addr)
161{
162 struct iwmct_parser *parser = &priv->parser;
163 struct iwmct_fw_load_hdr *hdr = (struct iwmct_fw_load_hdr *)parser->buf;
164 const u8 *cur_block = p_sec;
165 size_t sent = 0;
166 int cnt = 0;
167 int ret = 0;
168 u32 cmd = 0;
169
170 LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
171 LOG_INFO(priv, FW_DOWNLOAD, "Download address 0x%x size 0x%zx\n",
172 addr, sec_size);
173
174 while (sent < sec_size) {
175 int i;
176 u32 chksm = 0;
177 u32 reset = atomic_read(&priv->reset);
178 /* actual FW data */
179 u32 data_size = min(parser->buf_size - sizeof(*hdr),
180 sec_size - sent);
181 /* Pad to block size */
182 u32 trans_size = (data_size + sizeof(*hdr) +
183 IWMC_SDIO_BLK_SIZE - 1) &
184 ~(IWMC_SDIO_BLK_SIZE - 1);
185 ++cnt;
186
187 /* in case of reset, interrupt FW DOWNLAOD */
188 if (reset) {
189 LOG_INFO(priv, FW_DOWNLOAD,
190 "Reset detected. Abort FW download!!!");
191 ret = -ECANCELED;
192 goto exit;
193 }
194
195 memset(parser->buf, 0, parser->buf_size);
196 cmd |= IWMC_OPCODE_WRITE << CMD_HDR_OPCODE_POS;
197 cmd |= IWMC_CMD_SIGNATURE << CMD_HDR_SIGNATURE_POS;
198 cmd |= (priv->dbg.direct ? 1 : 0) << CMD_HDR_DIRECT_ACCESS_POS;
199 cmd |= (priv->dbg.checksum ? 1 : 0) << CMD_HDR_USE_CHECKSUM_POS;
200 hdr->data_size = cpu_to_le32(data_size);
201 hdr->target_addr = addr;
202
203 /* checksum is allowed for sizes divisible by 4 */
204 if (data_size & 0x3)
205 cmd &= ~CMD_HDR_USE_CHECKSUM_MSK;
206
207 memcpy(hdr->data, cur_block, data_size);
208
209
210 if (cmd & CMD_HDR_USE_CHECKSUM_MSK) {
211
212 chksm = data_size + le32_to_cpu(addr) + cmd;
213 for (i = 0; i < data_size >> 2; i++)
214 chksm += ((u32 *)cur_block)[i];
215
216 hdr->block_chksm = cpu_to_le32(chksm);
217 LOG_INFO(priv, FW_DOWNLOAD, "Checksum = 0x%X\n",
218 hdr->block_chksm);
219 }
220
221 LOG_INFO(priv, FW_DOWNLOAD, "trans#%d, len=%d, sent=%zd, "
222 "sec_size=%zd, startAddress 0x%X\n",
223 cnt, trans_size, sent, sec_size, addr);
224
225 if (priv->dbg.dump)
226 LOG_HEXDUMP(FW_DOWNLOAD, parser->buf, trans_size);
227
228
229 hdr->cmd = cpu_to_le32(cmd);
230 /* send it down */
231 /* TODO: add more proper sending and error checking */
232 ret = iwmct_tx(priv, parser->buf, trans_size);
233 if (ret != 0) {
234 LOG_INFO(priv, FW_DOWNLOAD,
235 "iwmct_tx returned %d\n", ret);
236 goto exit;
237 }
238
239 addr = cpu_to_le32(le32_to_cpu(addr) + data_size);
240 sent += data_size;
241 cur_block = p_sec + sent;
242
243 if (priv->dbg.blocks && (cnt + 1) >= priv->dbg.blocks) {
244 LOG_INFO(priv, FW_DOWNLOAD,
245 "Block number limit is reached [%d]\n",
246 priv->dbg.blocks);
247 break;
248 }
249 }
250
251 if (sent < sec_size)
252 ret = -EINVAL;
253exit:
254 LOG_TRACE(priv, FW_DOWNLOAD, "<--\n");
255 return ret;
256}
257
258static int iwmct_kick_fw(struct iwmct_priv *priv, bool jump)
259{
260 struct iwmct_parser *parser = &priv->parser;
261 struct iwmct_fw_load_hdr *hdr = (struct iwmct_fw_load_hdr *)parser->buf;
262 int ret;
263 u32 cmd;
264
265 LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
266
267 memset(parser->buf, 0, parser->buf_size);
268 cmd = IWMC_CMD_SIGNATURE << CMD_HDR_SIGNATURE_POS;
269 if (jump) {
270 cmd |= IWMC_OPCODE_JUMP << CMD_HDR_OPCODE_POS;
271 hdr->target_addr = cpu_to_le32(parser->entry_point);
272 LOG_INFO(priv, FW_DOWNLOAD, "jump address 0x%x\n",
273 parser->entry_point);
274 } else {
275 cmd |= IWMC_OPCODE_LAST_COMMAND << CMD_HDR_OPCODE_POS;
276 LOG_INFO(priv, FW_DOWNLOAD, "last command\n");
277 }
278
279 hdr->cmd = cpu_to_le32(cmd);
280
281 LOG_HEXDUMP(FW_DOWNLOAD, parser->buf, sizeof(*hdr));
282 /* send it down */
283 /* TODO: add more proper sending and error checking */
284 ret = iwmct_tx(priv, parser->buf, IWMC_SDIO_BLK_SIZE);
285 if (ret)
286 LOG_INFO(priv, FW_DOWNLOAD, "iwmct_tx returned %d", ret);
287
288 LOG_TRACE(priv, FW_DOWNLOAD, "<--\n");
289 return 0;
290}
291
292int iwmct_fw_load(struct iwmct_priv *priv)
293{
294 const u8 *fw_name = FW_NAME(FW_API_VER);
295 const struct firmware *raw;
296 const u8 *pdata;
297 size_t len;
298 __le32 addr;
299 int ret;
300
301
302 LOG_INFO(priv, FW_DOWNLOAD, "barker download request 0x%x is:\n",
303 priv->barker);
304 LOG_INFO(priv, FW_DOWNLOAD, "******* Top FW %s requested ********\n",
305 (priv->barker & BARKER_DNLOAD_TOP_MSK) ? "was" : "not");
306 LOG_INFO(priv, FW_DOWNLOAD, "******* GPS FW %s requested ********\n",
307 (priv->barker & BARKER_DNLOAD_GPS_MSK) ? "was" : "not");
308 LOG_INFO(priv, FW_DOWNLOAD, "******* BT FW %s requested ********\n",
309 (priv->barker & BARKER_DNLOAD_BT_MSK) ? "was" : "not");
310
311
312 /* get the firmware */
313 ret = request_firmware(&raw, fw_name, &priv->func->dev);
314 if (ret < 0) {
315 LOG_ERROR(priv, FW_DOWNLOAD, "%s request_firmware failed %d\n",
316 fw_name, ret);
317 goto exit;
318 }
319
320 if (raw->size < sizeof(struct iwmct_fw_sec_hdr)) {
321 LOG_ERROR(priv, FW_DOWNLOAD, "%s smaller then (%zd) (%zd)\n",
322 fw_name, sizeof(struct iwmct_fw_sec_hdr), raw->size);
323 goto exit;
324 }
325
326 LOG_INFO(priv, FW_DOWNLOAD, "Read firmware '%s'\n", fw_name);
327
328 /* clear parser struct */
329 ret = iwmct_fw_parser_init(priv, raw->data, raw->size, priv->trans_len);
330 if (ret < 0) {
331 LOG_ERROR(priv, FW_DOWNLOAD,
332 "iwmct_parser_init failed: Reason %d\n", ret);
333 goto exit;
334 }
335
336 if (!iwmct_checksum(priv)) {
337 LOG_ERROR(priv, FW_DOWNLOAD, "checksum error\n");
338 ret = -EINVAL;
339 goto exit;
340 }
341
342 /* download firmware to device */
343 while (iwmct_parse_next_section(priv, &pdata, &len, &addr)) {
344 ret = iwmct_download_section(priv, pdata, len, addr);
345 if (ret) {
346 LOG_ERROR(priv, FW_DOWNLOAD,
347 "%s download section failed\n", fw_name);
348 goto exit;
349 }
350 }
351
352 ret = iwmct_kick_fw(priv, !!(priv->barker & BARKER_DNLOAD_JUMP_MSK));
353
354exit:
355 kfree(priv->parser.buf);
356 release_firmware(raw);
357 return ret;
358}
diff --git a/drivers/misc/iwmc3200top/fw-msg.h b/drivers/misc/iwmc3200top/fw-msg.h
deleted file mode 100644
index 9e26b75bd482..000000000000
--- a/drivers/misc/iwmc3200top/fw-msg.h
+++ /dev/null
@@ -1,113 +0,0 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/fw-msg.h
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#ifndef __FWMSG_H__
28#define __FWMSG_H__
29
30#define COMM_TYPE_D2H 0xFF
31#define COMM_TYPE_H2D 0xEE
32
33#define COMM_CATEGORY_OPERATIONAL 0x00
34#define COMM_CATEGORY_DEBUG 0x01
35#define COMM_CATEGORY_TESTABILITY 0x02
36#define COMM_CATEGORY_DIAGNOSTICS 0x03
37
38#define OP_DBG_ZSTR_MSG cpu_to_le16(0x1A)
39
40#define FW_LOG_SRC_MAX 32
41#define FW_LOG_SRC_ALL 255
42
43#define FW_STRING_TABLE_ADDR cpu_to_le32(0x0C000000)
44
45#define CMD_DBG_LOG_LEVEL cpu_to_le16(0x0001)
46#define CMD_TST_DEV_RESET cpu_to_le16(0x0060)
47#define CMD_TST_FUNC_RESET cpu_to_le16(0x0062)
48#define CMD_TST_IFACE_RESET cpu_to_le16(0x0064)
49#define CMD_TST_CPU_UTILIZATION cpu_to_le16(0x0065)
50#define CMD_TST_TOP_DEEP_SLEEP cpu_to_le16(0x0080)
51#define CMD_TST_WAKEUP cpu_to_le16(0x0081)
52#define CMD_TST_FUNC_WAKEUP cpu_to_le16(0x0082)
53#define CMD_TST_FUNC_DEEP_SLEEP_REQUEST cpu_to_le16(0x0083)
54#define CMD_TST_GET_MEM_DUMP cpu_to_le16(0x0096)
55
56#define OP_OPR_ALIVE cpu_to_le16(0x0010)
57#define OP_OPR_CMD_ACK cpu_to_le16(0x001F)
58#define OP_OPR_CMD_NACK cpu_to_le16(0x0020)
59#define OP_TST_MEM_DUMP cpu_to_le16(0x0043)
60
61#define CMD_FLAG_PADDING_256 0x80
62
63#define FW_HCMD_BLOCK_SIZE 256
64
65struct msg_hdr {
66 u8 type;
67 u8 category;
68 __le16 opcode;
69 u8 seqnum;
70 u8 flags;
71 __le16 length;
72} __attribute__((__packed__));
73
74struct log_hdr {
75 __le32 timestamp;
76 u8 severity;
77 u8 logsource;
78 __le16 reserved;
79} __attribute__((__packed__));
80
81struct mdump_hdr {
82 u8 dmpid;
83 u8 frag;
84 __le16 size;
85 __le32 addr;
86} __attribute__((__packed__));
87
88struct top_msg {
89 struct msg_hdr hdr;
90 union {
91 /* D2H messages */
92 struct {
93 struct log_hdr log_hdr;
94 u8 data[1];
95 } __attribute__((__packed__)) log;
96
97 struct {
98 struct log_hdr log_hdr;
99 struct mdump_hdr md_hdr;
100 u8 data[1];
101 } __attribute__((__packed__)) mdump;
102
103 /* H2D messages */
104 struct {
105 u8 logsource;
106 u8 sevmask;
107 } __attribute__((__packed__)) logdefs[FW_LOG_SRC_MAX];
108 struct mdump_hdr mdump_req;
109 } u;
110} __attribute__((__packed__));
111
112
113#endif /* __FWMSG_H__ */
diff --git a/drivers/misc/iwmc3200top/iwmc3200top.h b/drivers/misc/iwmc3200top/iwmc3200top.h
deleted file mode 100644
index 620973ed8bf9..000000000000
--- a/drivers/misc/iwmc3200top/iwmc3200top.h
+++ /dev/null
@@ -1,205 +0,0 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/iwmc3200top.h
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#ifndef __IWMC3200TOP_H__
28#define __IWMC3200TOP_H__
29
30#include <linux/workqueue.h>
31
32#define DRV_NAME "iwmc3200top"
33#define FW_API_VER 1
34#define _FW_NAME(api) DRV_NAME "." #api ".fw"
35#define FW_NAME(api) _FW_NAME(api)
36
37#define IWMC_SDIO_BLK_SIZE 256
38#define IWMC_DEFAULT_TR_BLK 64
39#define IWMC_SDIO_DATA_ADDR 0x0
40#define IWMC_SDIO_INTR_ENABLE_ADDR 0x14
41#define IWMC_SDIO_INTR_STATUS_ADDR 0x13
42#define IWMC_SDIO_INTR_CLEAR_ADDR 0x13
43#define IWMC_SDIO_INTR_GET_SIZE_ADDR 0x2C
44
45#define COMM_HUB_HEADER_LENGTH 16
46#define LOGGER_HEADER_LENGTH 10
47
48
49#define BARKER_DNLOAD_BT_POS 0
50#define BARKER_DNLOAD_BT_MSK BIT(BARKER_DNLOAD_BT_POS)
51#define BARKER_DNLOAD_GPS_POS 1
52#define BARKER_DNLOAD_GPS_MSK BIT(BARKER_DNLOAD_GPS_POS)
53#define BARKER_DNLOAD_TOP_POS 2
54#define BARKER_DNLOAD_TOP_MSK BIT(BARKER_DNLOAD_TOP_POS)
55#define BARKER_DNLOAD_RESERVED1_POS 3
56#define BARKER_DNLOAD_RESERVED1_MSK BIT(BARKER_DNLOAD_RESERVED1_POS)
57#define BARKER_DNLOAD_JUMP_POS 4
58#define BARKER_DNLOAD_JUMP_MSK BIT(BARKER_DNLOAD_JUMP_POS)
59#define BARKER_DNLOAD_SYNC_POS 5
60#define BARKER_DNLOAD_SYNC_MSK BIT(BARKER_DNLOAD_SYNC_POS)
61#define BARKER_DNLOAD_RESERVED2_POS 6
62#define BARKER_DNLOAD_RESERVED2_MSK (0x3 << BARKER_DNLOAD_RESERVED2_POS)
63#define BARKER_DNLOAD_BARKER_POS 8
64#define BARKER_DNLOAD_BARKER_MSK (0xffffff << BARKER_DNLOAD_BARKER_POS)
65
66#define IWMC_BARKER_REBOOT (0xdeadbe << BARKER_DNLOAD_BARKER_POS)
67/* whole field barker */
68#define IWMC_BARKER_ACK 0xfeedbabe
69
70#define IWMC_CMD_SIGNATURE 0xcbbc
71
72#define CMD_HDR_OPCODE_POS 0
73#define CMD_HDR_OPCODE_MSK_MSK (0xf << CMD_HDR_OPCODE_MSK_POS)
74#define CMD_HDR_RESPONSE_CODE_POS 4
75#define CMD_HDR_RESPONSE_CODE_MSK (0xf << CMD_HDR_RESPONSE_CODE_POS)
76#define CMD_HDR_USE_CHECKSUM_POS 8
77#define CMD_HDR_USE_CHECKSUM_MSK BIT(CMD_HDR_USE_CHECKSUM_POS)
78#define CMD_HDR_RESPONSE_REQUIRED_POS 9
79#define CMD_HDR_RESPONSE_REQUIRED_MSK BIT(CMD_HDR_RESPONSE_REQUIRED_POS)
80#define CMD_HDR_DIRECT_ACCESS_POS 10
81#define CMD_HDR_DIRECT_ACCESS_MSK BIT(CMD_HDR_DIRECT_ACCESS_POS)
82#define CMD_HDR_RESERVED_POS 11
83#define CMD_HDR_RESERVED_MSK BIT(0x1f << CMD_HDR_RESERVED_POS)
84#define CMD_HDR_SIGNATURE_POS 16
85#define CMD_HDR_SIGNATURE_MSK BIT(0xffff << CMD_HDR_SIGNATURE_POS)
86
87enum {
88 IWMC_OPCODE_PING = 0,
89 IWMC_OPCODE_READ = 1,
90 IWMC_OPCODE_WRITE = 2,
91 IWMC_OPCODE_JUMP = 3,
92 IWMC_OPCODE_REBOOT = 4,
93 IWMC_OPCODE_PERSISTENT_WRITE = 5,
94 IWMC_OPCODE_PERSISTENT_READ = 6,
95 IWMC_OPCODE_READ_MODIFY_WRITE = 7,
96 IWMC_OPCODE_LAST_COMMAND = 15
97};
98
99struct iwmct_fw_load_hdr {
100 __le32 cmd;
101 __le32 target_addr;
102 __le32 data_size;
103 __le32 block_chksm;
104 u8 data[0];
105};
106
107/**
108 * struct iwmct_fw_hdr
109 * holds all sw components versions
110 */
111struct iwmct_fw_hdr {
112 u8 top_major;
113 u8 top_minor;
114 u8 top_revision;
115 u8 gps_major;
116 u8 gps_minor;
117 u8 gps_revision;
118 u8 bt_major;
119 u8 bt_minor;
120 u8 bt_revision;
121 u8 tic_name[31];
122};
123
124/**
125 * struct iwmct_fw_sec_hdr
126 * @type: function type
127 * @data_size: section's data size
128 * @target_addr: download address
129 */
130struct iwmct_fw_sec_hdr {
131 u8 type[4];
132 __le32 data_size;
133 __le32 target_addr;
134};
135
136/**
137 * struct iwmct_parser
138 * @file: fw image
139 * @file_size: fw size
140 * @cur_pos: position in file
141 * @buf: temp buf for download
142 * @buf_size: size of buf
143 * @entry_point: address to jump in fw kick-off
144 */
145struct iwmct_parser {
146 const u8 *file;
147 size_t file_size;
148 size_t cur_pos;
149 u8 *buf;
150 size_t buf_size;
151 u32 entry_point;
152 struct iwmct_fw_hdr versions;
153};
154
155
156struct iwmct_work_struct {
157 struct list_head list;
158 ssize_t iosize;
159};
160
161struct iwmct_dbg {
162 int blocks;
163 bool dump;
164 bool jump;
165 bool direct;
166 bool checksum;
167 bool fw_download;
168 int block_size;
169 int download_trans_blks;
170
171 char label_fw[256];
172};
173
174struct iwmct_debugfs;
175
176struct iwmct_priv {
177 struct sdio_func *func;
178 struct iwmct_debugfs *dbgfs;
179 struct iwmct_parser parser;
180 atomic_t reset;
181 atomic_t dev_sync;
182 u32 trans_len;
183 u32 barker;
184 struct iwmct_dbg dbg;
185
186 /* drivers work items */
187 struct work_struct bus_rescan_worker;
188 struct work_struct isr_worker;
189
190 /* drivers wait queue */
191 wait_queue_head_t wait_q;
192
193 /* rx request list */
194 struct list_head read_req_list;
195};
196
197extern int iwmct_tx(struct iwmct_priv *priv, void *src, int count);
198extern int iwmct_fw_load(struct iwmct_priv *priv);
199
200extern void iwmct_dbg_init_params(struct iwmct_priv *drv);
201extern void iwmct_dbg_init_drv_attrs(struct device_driver *drv);
202extern void iwmct_dbg_remove_drv_attrs(struct device_driver *drv);
203extern int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len);
204
205#endif /* __IWMC3200TOP_H__ */
diff --git a/drivers/misc/iwmc3200top/log.c b/drivers/misc/iwmc3200top/log.c
deleted file mode 100644
index a36a55a49cac..000000000000
--- a/drivers/misc/iwmc3200top/log.c
+++ /dev/null
@@ -1,348 +0,0 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/log.c
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#include <linux/kernel.h>
28#include <linux/mmc/sdio_func.h>
29#include <linux/slab.h>
30#include <linux/ctype.h>
31#include "fw-msg.h"
32#include "iwmc3200top.h"
33#include "log.h"
34
35/* Maximal hexadecimal string size of the FW memdump message */
36#define LOG_MSG_SIZE_MAX 12400
37
38/* iwmct_logdefs is a global used by log macros */
39u8 iwmct_logdefs[LOG_SRC_MAX];
40static u8 iwmct_fw_logdefs[FW_LOG_SRC_MAX];
41
42
43static int _log_set_log_filter(u8 *logdefs, int size, u8 src, u8 logmask)
44{
45 int i;
46
47 if (src < size)
48 logdefs[src] = logmask;
49 else if (src == LOG_SRC_ALL)
50 for (i = 0; i < size; i++)
51 logdefs[i] = logmask;
52 else
53 return -1;
54
55 return 0;
56}
57
58
59int iwmct_log_set_filter(u8 src, u8 logmask)
60{
61 return _log_set_log_filter(iwmct_logdefs, LOG_SRC_MAX, src, logmask);
62}
63
64
65int iwmct_log_set_fw_filter(u8 src, u8 logmask)
66{
67 return _log_set_log_filter(iwmct_fw_logdefs,
68 FW_LOG_SRC_MAX, src, logmask);
69}
70
71
72static int log_msg_format_hex(char *str, int slen, u8 *ibuf,
73 int ilen, char *pref)
74{
75 int pos = 0;
76 int i;
77 int len;
78
79 for (pos = 0, i = 0; pos < slen - 2 && pref[i] != '\0'; i++, pos++)
80 str[pos] = pref[i];
81
82 for (i = 0; pos < slen - 2 && i < ilen; pos += len, i++)
83 len = snprintf(&str[pos], slen - pos - 1, " %2.2X", ibuf[i]);
84
85 if (i < ilen)
86 return -1;
87
88 return 0;
89}
90
91/* NOTE: This function is not thread safe.
92 Currently it's called only from sdio rx worker - no race there
93*/
94void iwmct_log_top_message(struct iwmct_priv *priv, u8 *buf, int len)
95{
96 struct top_msg *msg;
97 static char logbuf[LOG_MSG_SIZE_MAX];
98
99 msg = (struct top_msg *)buf;
100
101 if (len < sizeof(msg->hdr) + sizeof(msg->u.log.log_hdr)) {
102 LOG_ERROR(priv, FW_MSG, "Log message from TOP "
103 "is too short %d (expected %zd)\n",
104 len, sizeof(msg->hdr) + sizeof(msg->u.log.log_hdr));
105 return;
106 }
107
108 if (!(iwmct_fw_logdefs[msg->u.log.log_hdr.logsource] &
109 BIT(msg->u.log.log_hdr.severity)) ||
110 !(iwmct_logdefs[LOG_SRC_FW_MSG] & BIT(msg->u.log.log_hdr.severity)))
111 return;
112
113 switch (msg->hdr.category) {
114 case COMM_CATEGORY_TESTABILITY:
115 if (!(iwmct_logdefs[LOG_SRC_TST] &
116 BIT(msg->u.log.log_hdr.severity)))
117 return;
118 if (log_msg_format_hex(logbuf, LOG_MSG_SIZE_MAX, buf,
119 le16_to_cpu(msg->hdr.length) +
120 sizeof(msg->hdr), "<TST>"))
121 LOG_WARNING(priv, TST,
122 "TOP TST message is too long, truncating...");
123 LOG_WARNING(priv, TST, "%s\n", logbuf);
124 break;
125 case COMM_CATEGORY_DEBUG:
126 if (msg->hdr.opcode == OP_DBG_ZSTR_MSG)
127 LOG_INFO(priv, FW_MSG, "%s %s", "<DBG>",
128 ((u8 *)msg) + sizeof(msg->hdr)
129 + sizeof(msg->u.log.log_hdr));
130 else {
131 if (log_msg_format_hex(logbuf, LOG_MSG_SIZE_MAX, buf,
132 le16_to_cpu(msg->hdr.length)
133 + sizeof(msg->hdr),
134 "<DBG>"))
135 LOG_WARNING(priv, FW_MSG,
136 "TOP DBG message is too long,"
137 "truncating...");
138 LOG_WARNING(priv, FW_MSG, "%s\n", logbuf);
139 }
140 break;
141 default:
142 break;
143 }
144}
145
146static int _log_get_filter_str(u8 *logdefs, int logdefsz, char *buf, int size)
147{
148 int i, pos, len;
149 for (i = 0, pos = 0; (pos < size-1) && (i < logdefsz); i++) {
150 len = snprintf(&buf[pos], size - pos - 1, "0x%02X%02X,",
151 i, logdefs[i]);
152 pos += len;
153 }
154 buf[pos-1] = '\n';
155 buf[pos] = '\0';
156
157 if (i < logdefsz)
158 return -1;
159 return 0;
160}
161
162int log_get_filter_str(char *buf, int size)
163{
164 return _log_get_filter_str(iwmct_logdefs, LOG_SRC_MAX, buf, size);
165}
166
167int log_get_fw_filter_str(char *buf, int size)
168{
169 return _log_get_filter_str(iwmct_fw_logdefs, FW_LOG_SRC_MAX, buf, size);
170}
171
172#define HEXADECIMAL_RADIX 16
173#define LOG_SRC_FORMAT 7 /* log level is in format of "0xXXXX," */
174
175ssize_t show_iwmct_log_level(struct device *d,
176 struct device_attribute *attr, char *buf)
177{
178 struct iwmct_priv *priv = dev_get_drvdata(d);
179 char *str_buf;
180 int buf_size;
181 ssize_t ret;
182
183 buf_size = (LOG_SRC_FORMAT * LOG_SRC_MAX) + 1;
184 str_buf = kzalloc(buf_size, GFP_KERNEL);
185 if (!str_buf) {
186 LOG_ERROR(priv, DEBUGFS,
187 "failed to allocate %d bytes\n", buf_size);
188 ret = -ENOMEM;
189 goto exit;
190 }
191
192 if (log_get_filter_str(str_buf, buf_size) < 0) {
193 ret = -EINVAL;
194 goto exit;
195 }
196
197 ret = sprintf(buf, "%s", str_buf);
198
199exit:
200 kfree(str_buf);
201 return ret;
202}
203
204ssize_t store_iwmct_log_level(struct device *d,
205 struct device_attribute *attr,
206 const char *buf, size_t count)
207{
208 struct iwmct_priv *priv = dev_get_drvdata(d);
209 char *token, *str_buf = NULL;
210 long val;
211 ssize_t ret = count;
212 u8 src, mask;
213
214 if (!count)
215 goto exit;
216
217 str_buf = kzalloc(count, GFP_KERNEL);
218 if (!str_buf) {
219 LOG_ERROR(priv, DEBUGFS,
220 "failed to allocate %zd bytes\n", count);
221 ret = -ENOMEM;
222 goto exit;
223 }
224
225 memcpy(str_buf, buf, count);
226
227 while ((token = strsep(&str_buf, ",")) != NULL) {
228 while (isspace(*token))
229 ++token;
230 if (strict_strtol(token, HEXADECIMAL_RADIX, &val)) {
231 LOG_ERROR(priv, DEBUGFS,
232 "failed to convert string to long %s\n",
233 token);
234 ret = -EINVAL;
235 goto exit;
236 }
237
238 mask = val & 0xFF;
239 src = (val & 0XFF00) >> 8;
240 iwmct_log_set_filter(src, mask);
241 }
242
243exit:
244 kfree(str_buf);
245 return ret;
246}
247
248ssize_t show_iwmct_log_level_fw(struct device *d,
249 struct device_attribute *attr, char *buf)
250{
251 struct iwmct_priv *priv = dev_get_drvdata(d);
252 char *str_buf;
253 int buf_size;
254 ssize_t ret;
255
256 buf_size = (LOG_SRC_FORMAT * FW_LOG_SRC_MAX) + 2;
257
258 str_buf = kzalloc(buf_size, GFP_KERNEL);
259 if (!str_buf) {
260 LOG_ERROR(priv, DEBUGFS,
261 "failed to allocate %d bytes\n", buf_size);
262 ret = -ENOMEM;
263 goto exit;
264 }
265
266 if (log_get_fw_filter_str(str_buf, buf_size) < 0) {
267 ret = -EINVAL;
268 goto exit;
269 }
270
271 ret = sprintf(buf, "%s", str_buf);
272
273exit:
274 kfree(str_buf);
275 return ret;
276}
277
278ssize_t store_iwmct_log_level_fw(struct device *d,
279 struct device_attribute *attr,
280 const char *buf, size_t count)
281{
282 struct iwmct_priv *priv = dev_get_drvdata(d);
283 struct top_msg cmd;
284 char *token, *str_buf = NULL;
285 ssize_t ret = count;
286 u16 cmdlen = 0;
287 int i;
288 long val;
289 u8 src, mask;
290
291 if (!count)
292 goto exit;
293
294 str_buf = kzalloc(count, GFP_KERNEL);
295 if (!str_buf) {
296 LOG_ERROR(priv, DEBUGFS,
297 "failed to allocate %zd bytes\n", count);
298 ret = -ENOMEM;
299 goto exit;
300 }
301
302 memcpy(str_buf, buf, count);
303
304 cmd.hdr.type = COMM_TYPE_H2D;
305 cmd.hdr.category = COMM_CATEGORY_DEBUG;
306 cmd.hdr.opcode = CMD_DBG_LOG_LEVEL;
307
308 for (i = 0; ((token = strsep(&str_buf, ",")) != NULL) &&
309 (i < FW_LOG_SRC_MAX); i++) {
310
311 while (isspace(*token))
312 ++token;
313
314 if (strict_strtol(token, HEXADECIMAL_RADIX, &val)) {
315 LOG_ERROR(priv, DEBUGFS,
316 "failed to convert string to long %s\n",
317 token);
318 ret = -EINVAL;
319 goto exit;
320 }
321
322 mask = val & 0xFF; /* LSB */
323 src = (val & 0XFF00) >> 8; /* 2nd least significant byte. */
324 iwmct_log_set_fw_filter(src, mask);
325
326 cmd.u.logdefs[i].logsource = src;
327 cmd.u.logdefs[i].sevmask = mask;
328 }
329
330 cmd.hdr.length = cpu_to_le16(i * sizeof(cmd.u.logdefs[0]));
331 cmdlen = (i * sizeof(cmd.u.logdefs[0]) + sizeof(cmd.hdr));
332
333 ret = iwmct_send_hcmd(priv, (u8 *)&cmd, cmdlen);
334 if (ret) {
335 LOG_ERROR(priv, DEBUGFS,
336 "Failed to send %d bytes of fwcmd, ret=%zd\n",
337 cmdlen, ret);
338 goto exit;
339 } else
340 LOG_INFO(priv, DEBUGFS, "fwcmd sent (%d bytes)\n", cmdlen);
341
342 ret = count;
343
344exit:
345 kfree(str_buf);
346 return ret;
347}
348
diff --git a/drivers/misc/iwmc3200top/log.h b/drivers/misc/iwmc3200top/log.h
deleted file mode 100644
index 4434bb16cea7..000000000000
--- a/drivers/misc/iwmc3200top/log.h
+++ /dev/null
@@ -1,171 +0,0 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/log.h
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#ifndef __LOG_H__
28#define __LOG_H__
29
30
31/* log severity:
32 * The log levels here match FW log levels
33 * so values need to stay as is */
34#define LOG_SEV_CRITICAL 0
35#define LOG_SEV_ERROR 1
36#define LOG_SEV_WARNING 2
37#define LOG_SEV_INFO 3
38#define LOG_SEV_INFOEX 4
39
40/* Log levels not defined for FW */
41#define LOG_SEV_TRACE 5
42#define LOG_SEV_DUMP 6
43
44#define LOG_SEV_FW_FILTER_ALL \
45 (BIT(LOG_SEV_CRITICAL) | \
46 BIT(LOG_SEV_ERROR) | \
47 BIT(LOG_SEV_WARNING) | \
48 BIT(LOG_SEV_INFO) | \
49 BIT(LOG_SEV_INFOEX))
50
51#define LOG_SEV_FILTER_ALL \
52 (BIT(LOG_SEV_CRITICAL) | \
53 BIT(LOG_SEV_ERROR) | \
54 BIT(LOG_SEV_WARNING) | \
55 BIT(LOG_SEV_INFO) | \
56 BIT(LOG_SEV_INFOEX) | \
57 BIT(LOG_SEV_TRACE) | \
58 BIT(LOG_SEV_DUMP))
59
60/* log source */
61#define LOG_SRC_INIT 0
62#define LOG_SRC_DEBUGFS 1
63#define LOG_SRC_FW_DOWNLOAD 2
64#define LOG_SRC_FW_MSG 3
65#define LOG_SRC_TST 4
66#define LOG_SRC_IRQ 5
67
68#define LOG_SRC_MAX 6
69#define LOG_SRC_ALL 0xFF
70
71/**
72 * Default intitialization runtime log level
73 */
74#ifndef LOG_SEV_FILTER_RUNTIME
75#define LOG_SEV_FILTER_RUNTIME \
76 (BIT(LOG_SEV_CRITICAL) | \
77 BIT(LOG_SEV_ERROR) | \
78 BIT(LOG_SEV_WARNING))
79#endif
80
81#ifndef FW_LOG_SEV_FILTER_RUNTIME
82#define FW_LOG_SEV_FILTER_RUNTIME LOG_SEV_FILTER_ALL
83#endif
84
85#ifdef CONFIG_IWMC3200TOP_DEBUG
86/**
87 * Log macros
88 */
89
90#define priv2dev(priv) (&(priv->func)->dev)
91
92#define LOG_CRITICAL(priv, src, fmt, args...) \
93do { \
94 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_CRITICAL)) \
95 dev_crit(priv2dev(priv), "%s %d: " fmt, \
96 __func__, __LINE__, ##args); \
97} while (0)
98
99#define LOG_ERROR(priv, src, fmt, args...) \
100do { \
101 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_ERROR)) \
102 dev_err(priv2dev(priv), "%s %d: " fmt, \
103 __func__, __LINE__, ##args); \
104} while (0)
105
106#define LOG_WARNING(priv, src, fmt, args...) \
107do { \
108 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_WARNING)) \
109 dev_warn(priv2dev(priv), "%s %d: " fmt, \
110 __func__, __LINE__, ##args); \
111} while (0)
112
113#define LOG_INFO(priv, src, fmt, args...) \
114do { \
115 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFO)) \
116 dev_info(priv2dev(priv), "%s %d: " fmt, \
117 __func__, __LINE__, ##args); \
118} while (0)
119
120#define LOG_TRACE(priv, src, fmt, args...) \
121do { \
122 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_TRACE)) \
123 dev_dbg(priv2dev(priv), "%s %d: " fmt, \
124 __func__, __LINE__, ##args); \
125} while (0)
126
127#define LOG_HEXDUMP(src, ptr, len) \
128do { \
129 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_DUMP)) \
130 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, \
131 16, 1, ptr, len, false); \
132} while (0)
133
134void iwmct_log_top_message(struct iwmct_priv *priv, u8 *buf, int len);
135
136extern u8 iwmct_logdefs[];
137
138int iwmct_log_set_filter(u8 src, u8 logmask);
139int iwmct_log_set_fw_filter(u8 src, u8 logmask);
140
141ssize_t show_iwmct_log_level(struct device *d,
142 struct device_attribute *attr, char *buf);
143ssize_t store_iwmct_log_level(struct device *d,
144 struct device_attribute *attr,
145 const char *buf, size_t count);
146ssize_t show_iwmct_log_level_fw(struct device *d,
147 struct device_attribute *attr, char *buf);
148ssize_t store_iwmct_log_level_fw(struct device *d,
149 struct device_attribute *attr,
150 const char *buf, size_t count);
151
152#else
153
154#define LOG_CRITICAL(priv, src, fmt, args...)
155#define LOG_ERROR(priv, src, fmt, args...)
156#define LOG_WARNING(priv, src, fmt, args...)
157#define LOG_INFO(priv, src, fmt, args...)
158#define LOG_TRACE(priv, src, fmt, args...)
159#define LOG_HEXDUMP(src, ptr, len)
160
161static inline void iwmct_log_top_message(struct iwmct_priv *priv,
162 u8 *buf, int len) {}
163static inline int iwmct_log_set_filter(u8 src, u8 logmask) { return 0; }
164static inline int iwmct_log_set_fw_filter(u8 src, u8 logmask) { return 0; }
165
166#endif /* CONFIG_IWMC3200TOP_DEBUG */
167
168int log_get_filter_str(char *buf, int size);
169int log_get_fw_filter_str(char *buf, int size);
170
171#endif /* __LOG_H__ */
diff --git a/drivers/misc/iwmc3200top/main.c b/drivers/misc/iwmc3200top/main.c
deleted file mode 100644
index 701eb600b127..000000000000
--- a/drivers/misc/iwmc3200top/main.c
+++ /dev/null
@@ -1,662 +0,0 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/main.c
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#include <linux/module.h>
28#include <linux/slab.h>
29#include <linux/init.h>
30#include <linux/kernel.h>
31#include <linux/debugfs.h>
32#include <linux/mmc/sdio_ids.h>
33#include <linux/mmc/sdio_func.h>
34#include <linux/mmc/sdio.h>
35
36#include "iwmc3200top.h"
37#include "log.h"
38#include "fw-msg.h"
39#include "debugfs.h"
40
41
42#define DRIVER_DESCRIPTION "Intel(R) IWMC 3200 Top Driver"
43#define DRIVER_COPYRIGHT "Copyright (c) 2008 Intel Corporation."
44
45#define DRIVER_VERSION "0.1.62"
46
47MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
48MODULE_VERSION(DRIVER_VERSION);
49MODULE_LICENSE("GPL");
50MODULE_AUTHOR(DRIVER_COPYRIGHT);
51MODULE_FIRMWARE(FW_NAME(FW_API_VER));
52
53
54static inline int __iwmct_tx(struct iwmct_priv *priv, void *src, int count)
55{
56 return sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR, src, count);
57
58}
59int iwmct_tx(struct iwmct_priv *priv, void *src, int count)
60{
61 int ret;
62 sdio_claim_host(priv->func);
63 ret = __iwmct_tx(priv, src, count);
64 sdio_release_host(priv->func);
65 return ret;
66}
67/*
68 * This workers main task is to wait for OP_OPR_ALIVE
69 * from TOP FW until ALIVE_MSG_TIMOUT timeout is elapsed.
70 * When OP_OPR_ALIVE received it will issue
71 * a call to "bus_rescan_devices".
72 */
73static void iwmct_rescan_worker(struct work_struct *ws)
74{
75 struct iwmct_priv *priv;
76 int ret;
77
78 priv = container_of(ws, struct iwmct_priv, bus_rescan_worker);
79
80 LOG_INFO(priv, FW_MSG, "Calling bus_rescan\n");
81
82 ret = bus_rescan_devices(priv->func->dev.bus);
83 if (ret < 0)
84 LOG_INFO(priv, INIT, "bus_rescan_devices FAILED!!!\n");
85}
86
87static void op_top_message(struct iwmct_priv *priv, struct top_msg *msg)
88{
89 switch (msg->hdr.opcode) {
90 case OP_OPR_ALIVE:
91 LOG_INFO(priv, FW_MSG, "Got ALIVE from device, wake rescan\n");
92 schedule_work(&priv->bus_rescan_worker);
93 break;
94 default:
95 LOG_INFO(priv, FW_MSG, "Received msg opcode 0x%X\n",
96 msg->hdr.opcode);
97 break;
98 }
99}
100
101
102static void handle_top_message(struct iwmct_priv *priv, u8 *buf, int len)
103{
104 struct top_msg *msg;
105
106 msg = (struct top_msg *)buf;
107
108 if (msg->hdr.type != COMM_TYPE_D2H) {
109 LOG_ERROR(priv, FW_MSG,
110 "Message from TOP with invalid message type 0x%X\n",
111 msg->hdr.type);
112 return;
113 }
114
115 if (len < sizeof(msg->hdr)) {
116 LOG_ERROR(priv, FW_MSG,
117 "Message from TOP is too short for message header "
118 "received %d bytes, expected at least %zd bytes\n",
119 len, sizeof(msg->hdr));
120 return;
121 }
122
123 if (len < le16_to_cpu(msg->hdr.length) + sizeof(msg->hdr)) {
124 LOG_ERROR(priv, FW_MSG,
125 "Message length (%d bytes) is shorter than "
126 "in header (%d bytes)\n",
127 len, le16_to_cpu(msg->hdr.length));
128 return;
129 }
130
131 switch (msg->hdr.category) {
132 case COMM_CATEGORY_OPERATIONAL:
133 op_top_message(priv, (struct top_msg *)buf);
134 break;
135
136 case COMM_CATEGORY_DEBUG:
137 case COMM_CATEGORY_TESTABILITY:
138 case COMM_CATEGORY_DIAGNOSTICS:
139 iwmct_log_top_message(priv, buf, len);
140 break;
141
142 default:
143 LOG_ERROR(priv, FW_MSG,
144 "Message from TOP with unknown category 0x%X\n",
145 msg->hdr.category);
146 break;
147 }
148}
149
150int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len)
151{
152 int ret;
153 u8 *buf;
154
155 LOG_TRACE(priv, FW_MSG, "Sending hcmd:\n");
156
157 /* add padding to 256 for IWMC */
158 ((struct top_msg *)cmd)->hdr.flags |= CMD_FLAG_PADDING_256;
159
160 LOG_HEXDUMP(FW_MSG, cmd, len);
161
162 if (len > FW_HCMD_BLOCK_SIZE) {
163 LOG_ERROR(priv, FW_MSG, "size %d exceeded hcmd max size %d\n",
164 len, FW_HCMD_BLOCK_SIZE);
165 return -1;
166 }
167
168 buf = kzalloc(FW_HCMD_BLOCK_SIZE, GFP_KERNEL);
169 if (!buf) {
170 LOG_ERROR(priv, FW_MSG, "kzalloc error, buf size %d\n",
171 FW_HCMD_BLOCK_SIZE);
172 return -1;
173 }
174
175 memcpy(buf, cmd, len);
176 ret = iwmct_tx(priv, buf, FW_HCMD_BLOCK_SIZE);
177
178 kfree(buf);
179 return ret;
180}
181
182
183static void iwmct_irq_read_worker(struct work_struct *ws)
184{
185 struct iwmct_priv *priv;
186 struct iwmct_work_struct *read_req;
187 __le32 *buf = NULL;
188 int ret;
189 int iosize;
190 u32 barker;
191 bool is_barker;
192
193 priv = container_of(ws, struct iwmct_priv, isr_worker);
194
195 LOG_TRACE(priv, IRQ, "enter iwmct_irq_read_worker %p\n", ws);
196
197 /* --------------------- Handshake with device -------------------- */
198 sdio_claim_host(priv->func);
199
200 /* all list manipulations have to be protected by
201 * sdio_claim_host/sdio_release_host */
202 if (list_empty(&priv->read_req_list)) {
203 LOG_ERROR(priv, IRQ, "read_req_list empty in read worker\n");
204 goto exit_release;
205 }
206
207 read_req = list_entry(priv->read_req_list.next,
208 struct iwmct_work_struct, list);
209
210 list_del(&read_req->list);
211 iosize = read_req->iosize;
212 kfree(read_req);
213
214 buf = kzalloc(iosize, GFP_KERNEL);
215 if (!buf) {
216 LOG_ERROR(priv, IRQ, "kzalloc error, buf size %d\n", iosize);
217 goto exit_release;
218 }
219
220 LOG_INFO(priv, IRQ, "iosize=%d, buf=%p, func=%d\n",
221 iosize, buf, priv->func->num);
222
223 /* read from device */
224 ret = sdio_memcpy_fromio(priv->func, buf, IWMC_SDIO_DATA_ADDR, iosize);
225 if (ret) {
226 LOG_ERROR(priv, IRQ, "error %d reading buffer\n", ret);
227 goto exit_release;
228 }
229
230 LOG_HEXDUMP(IRQ, (u8 *)buf, iosize);
231
232 barker = le32_to_cpu(buf[0]);
233
234 /* Verify whether it's a barker and if not - treat as regular Rx */
235 if (barker == IWMC_BARKER_ACK ||
236 (barker & BARKER_DNLOAD_BARKER_MSK) == IWMC_BARKER_REBOOT) {
237
238 /* Valid Barker is equal on first 4 dwords */
239 is_barker = (buf[1] == buf[0]) &&
240 (buf[2] == buf[0]) &&
241 (buf[3] == buf[0]);
242
243 if (!is_barker) {
244 LOG_WARNING(priv, IRQ,
245 "Potentially inconsistent barker "
246 "%08X_%08X_%08X_%08X\n",
247 le32_to_cpu(buf[0]), le32_to_cpu(buf[1]),
248 le32_to_cpu(buf[2]), le32_to_cpu(buf[3]));
249 }
250 } else {
251 is_barker = false;
252 }
253
254 /* Handle Top CommHub message */
255 if (!is_barker) {
256 sdio_release_host(priv->func);
257 handle_top_message(priv, (u8 *)buf, iosize);
258 goto exit;
259 } else if (barker == IWMC_BARKER_ACK) { /* Handle barkers */
260 if (atomic_read(&priv->dev_sync) == 0) {
261 LOG_ERROR(priv, IRQ,
262 "ACK barker arrived out-of-sync\n");
263 goto exit_release;
264 }
265
266 /* Continuing to FW download (after Sync is completed)*/
267 atomic_set(&priv->dev_sync, 0);
268 LOG_INFO(priv, IRQ, "ACK barker arrived "
269 "- starting FW download\n");
270 } else { /* REBOOT barker */
271 LOG_INFO(priv, IRQ, "Received reboot barker: %x\n", barker);
272 priv->barker = barker;
273
274 if (barker & BARKER_DNLOAD_SYNC_MSK) {
275 /* Send the same barker back */
276 ret = __iwmct_tx(priv, buf, iosize);
277 if (ret) {
278 LOG_ERROR(priv, IRQ,
279 "error %d echoing barker\n", ret);
280 goto exit_release;
281 }
282 LOG_INFO(priv, IRQ, "Echoing barker to device\n");
283 atomic_set(&priv->dev_sync, 1);
284 goto exit_release;
285 }
286
287 /* Continuing to FW download (without Sync) */
288 LOG_INFO(priv, IRQ, "No sync requested "
289 "- starting FW download\n");
290 }
291
292 sdio_release_host(priv->func);
293
294 if (priv->dbg.fw_download)
295 iwmct_fw_load(priv);
296 else
297 LOG_ERROR(priv, IRQ, "FW download not allowed\n");
298
299 goto exit;
300
301exit_release:
302 sdio_release_host(priv->func);
303exit:
304 kfree(buf);
305 LOG_TRACE(priv, IRQ, "exit iwmct_irq_read_worker\n");
306}
307
308static void iwmct_irq(struct sdio_func *func)
309{
310 struct iwmct_priv *priv;
311 int val, ret;
312 int iosize;
313 int addr = IWMC_SDIO_INTR_GET_SIZE_ADDR;
314 struct iwmct_work_struct *read_req;
315
316 priv = sdio_get_drvdata(func);
317
318 LOG_TRACE(priv, IRQ, "enter iwmct_irq\n");
319
320 /* read the function's status register */
321 val = sdio_readb(func, IWMC_SDIO_INTR_STATUS_ADDR, &ret);
322
323 LOG_TRACE(priv, IRQ, "iir value = %d, ret=%d\n", val, ret);
324
325 if (!val) {
326 LOG_ERROR(priv, IRQ, "iir = 0, exiting ISR\n");
327 goto exit_clear_intr;
328 }
329
330
331 /*
332 * read 2 bytes of the transaction size
333 * IMPORTANT: sdio transaction size has to be read before clearing
334 * sdio interrupt!!!
335 */
336 val = sdio_readb(priv->func, addr++, &ret);
337 iosize = val;
338 val = sdio_readb(priv->func, addr++, &ret);
339 iosize += val << 8;
340
341 LOG_INFO(priv, IRQ, "READ size %d\n", iosize);
342
343 if (iosize == 0) {
344 LOG_ERROR(priv, IRQ, "READ size %d, exiting ISR\n", iosize);
345 goto exit_clear_intr;
346 }
347
348 /* allocate a work structure to pass iosize to the worker */
349 read_req = kzalloc(sizeof(struct iwmct_work_struct), GFP_KERNEL);
350 if (!read_req) {
351 LOG_ERROR(priv, IRQ, "failed to allocate read_req, exit ISR\n");
352 goto exit_clear_intr;
353 }
354
355 INIT_LIST_HEAD(&read_req->list);
356 read_req->iosize = iosize;
357
358 list_add_tail(&priv->read_req_list, &read_req->list);
359
360 /* clear the function's interrupt request bit (write 1 to clear) */
361 sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret);
362
363 schedule_work(&priv->isr_worker);
364
365 LOG_TRACE(priv, IRQ, "exit iwmct_irq\n");
366
367 return;
368
369exit_clear_intr:
370 /* clear the function's interrupt request bit (write 1 to clear) */
371 sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret);
372}
373
374
375static int blocks;
376module_param(blocks, int, 0604);
377MODULE_PARM_DESC(blocks, "max_blocks_to_send");
378
379static bool dump;
380module_param(dump, bool, 0604);
381MODULE_PARM_DESC(dump, "dump_hex_content");
382
383static bool jump = 1;
384module_param(jump, bool, 0604);
385
386static bool direct = 1;
387module_param(direct, bool, 0604);
388
389static bool checksum = 1;
390module_param(checksum, bool, 0604);
391
392static bool fw_download = 1;
393module_param(fw_download, bool, 0604);
394
395static int block_size = IWMC_SDIO_BLK_SIZE;
396module_param(block_size, int, 0404);
397
398static int download_trans_blks = IWMC_DEFAULT_TR_BLK;
399module_param(download_trans_blks, int, 0604);
400
401static bool rubbish_barker;
402module_param(rubbish_barker, bool, 0604);
403
404#ifdef CONFIG_IWMC3200TOP_DEBUG
405static int log_level[LOG_SRC_MAX];
406static unsigned int log_level_argc;
407module_param_array(log_level, int, &log_level_argc, 0604);
408MODULE_PARM_DESC(log_level, "log_level");
409
410static int log_level_fw[FW_LOG_SRC_MAX];
411static unsigned int log_level_fw_argc;
412module_param_array(log_level_fw, int, &log_level_fw_argc, 0604);
413MODULE_PARM_DESC(log_level_fw, "log_level_fw");
414#endif
415
416void iwmct_dbg_init_params(struct iwmct_priv *priv)
417{
418#ifdef CONFIG_IWMC3200TOP_DEBUG
419 int i;
420
421 for (i = 0; i < log_level_argc; i++) {
422 dev_notice(&priv->func->dev, "log_level[%d]=0x%X\n",
423 i, log_level[i]);
424 iwmct_log_set_filter((log_level[i] >> 8) & 0xFF,
425 log_level[i] & 0xFF);
426 }
427 for (i = 0; i < log_level_fw_argc; i++) {
428 dev_notice(&priv->func->dev, "log_level_fw[%d]=0x%X\n",
429 i, log_level_fw[i]);
430 iwmct_log_set_fw_filter((log_level_fw[i] >> 8) & 0xFF,
431 log_level_fw[i] & 0xFF);
432 }
433#endif
434
435 priv->dbg.blocks = blocks;
436 LOG_INFO(priv, INIT, "blocks=%d\n", blocks);
437 priv->dbg.dump = (bool)dump;
438 LOG_INFO(priv, INIT, "dump=%d\n", dump);
439 priv->dbg.jump = (bool)jump;
440 LOG_INFO(priv, INIT, "jump=%d\n", jump);
441 priv->dbg.direct = (bool)direct;
442 LOG_INFO(priv, INIT, "direct=%d\n", direct);
443 priv->dbg.checksum = (bool)checksum;
444 LOG_INFO(priv, INIT, "checksum=%d\n", checksum);
445 priv->dbg.fw_download = (bool)fw_download;
446 LOG_INFO(priv, INIT, "fw_download=%d\n", fw_download);
447 priv->dbg.block_size = block_size;
448 LOG_INFO(priv, INIT, "block_size=%d\n", block_size);
449 priv->dbg.download_trans_blks = download_trans_blks;
450 LOG_INFO(priv, INIT, "download_trans_blks=%d\n", download_trans_blks);
451}
452
453/*****************************************************************************
454 *
455 * sysfs attributes
456 *
457 *****************************************************************************/
458static ssize_t show_iwmct_fw_version(struct device *d,
459 struct device_attribute *attr, char *buf)
460{
461 struct iwmct_priv *priv = dev_get_drvdata(d);
462 return sprintf(buf, "%s\n", priv->dbg.label_fw);
463}
464static DEVICE_ATTR(cc_label_fw, S_IRUGO, show_iwmct_fw_version, NULL);
465
466#ifdef CONFIG_IWMC3200TOP_DEBUG
467static DEVICE_ATTR(log_level, S_IWUSR | S_IRUGO,
468 show_iwmct_log_level, store_iwmct_log_level);
469static DEVICE_ATTR(log_level_fw, S_IWUSR | S_IRUGO,
470 show_iwmct_log_level_fw, store_iwmct_log_level_fw);
471#endif
472
473static struct attribute *iwmct_sysfs_entries[] = {
474 &dev_attr_cc_label_fw.attr,
475#ifdef CONFIG_IWMC3200TOP_DEBUG
476 &dev_attr_log_level.attr,
477 &dev_attr_log_level_fw.attr,
478#endif
479 NULL
480};
481
482static struct attribute_group iwmct_attribute_group = {
483 .name = NULL, /* put in device directory */
484 .attrs = iwmct_sysfs_entries,
485};
486
487
488static int iwmct_probe(struct sdio_func *func,
489 const struct sdio_device_id *id)
490{
491 struct iwmct_priv *priv;
492 int ret;
493 int val = 1;
494 int addr = IWMC_SDIO_INTR_ENABLE_ADDR;
495
496 dev_dbg(&func->dev, "enter iwmct_probe\n");
497
498 dev_dbg(&func->dev, "IRQ polling period id %u msecs, HZ is %d\n",
499 jiffies_to_msecs(2147483647), HZ);
500
501 priv = kzalloc(sizeof(struct iwmct_priv), GFP_KERNEL);
502 if (!priv) {
503 dev_err(&func->dev, "kzalloc error\n");
504 return -ENOMEM;
505 }
506 priv->func = func;
507 sdio_set_drvdata(func, priv);
508
509 INIT_WORK(&priv->bus_rescan_worker, iwmct_rescan_worker);
510 INIT_WORK(&priv->isr_worker, iwmct_irq_read_worker);
511
512 init_waitqueue_head(&priv->wait_q);
513
514 sdio_claim_host(func);
515 /* FIXME: Remove after it is fixed in the Boot ROM upgrade */
516 func->enable_timeout = 10;
517
518 /* In our HW, setting the block size also wakes up the boot rom. */
519 ret = sdio_set_block_size(func, priv->dbg.block_size);
520 if (ret) {
521 LOG_ERROR(priv, INIT,
522 "sdio_set_block_size() failure: %d\n", ret);
523 goto error_sdio_enable;
524 }
525
526 ret = sdio_enable_func(func);
527 if (ret) {
528 LOG_ERROR(priv, INIT, "sdio_enable_func() failure: %d\n", ret);
529 goto error_sdio_enable;
530 }
531
532 /* init reset and dev_sync states */
533 atomic_set(&priv->reset, 0);
534 atomic_set(&priv->dev_sync, 0);
535
536 /* init read req queue */
537 INIT_LIST_HEAD(&priv->read_req_list);
538
539 /* process configurable parameters */
540 iwmct_dbg_init_params(priv);
541 ret = sysfs_create_group(&func->dev.kobj, &iwmct_attribute_group);
542 if (ret) {
543 LOG_ERROR(priv, INIT, "Failed to register attributes and "
544 "initialize module_params\n");
545 goto error_dev_attrs;
546 }
547
548 iwmct_dbgfs_register(priv, DRV_NAME);
549
550 if (!priv->dbg.direct && priv->dbg.download_trans_blks > 8) {
551 LOG_INFO(priv, INIT,
552 "Reducing transaction to 8 blocks = 2K (from %d)\n",
553 priv->dbg.download_trans_blks);
554 priv->dbg.download_trans_blks = 8;
555 }
556 priv->trans_len = priv->dbg.download_trans_blks * priv->dbg.block_size;
557 LOG_INFO(priv, INIT, "Transaction length = %d\n", priv->trans_len);
558
559 ret = sdio_claim_irq(func, iwmct_irq);
560 if (ret) {
561 LOG_ERROR(priv, INIT, "sdio_claim_irq() failure: %d\n", ret);
562 goto error_claim_irq;
563 }
564
565
566 /* Enable function's interrupt */
567 sdio_writeb(priv->func, val, addr, &ret);
568 if (ret) {
569 LOG_ERROR(priv, INIT, "Failure writing to "
570 "Interrupt Enable Register (%d): %d\n", addr, ret);
571 goto error_enable_int;
572 }
573
574 sdio_release_host(func);
575
576 LOG_INFO(priv, INIT, "exit iwmct_probe\n");
577
578 return ret;
579
580error_enable_int:
581 sdio_release_irq(func);
582error_claim_irq:
583 sdio_disable_func(func);
584error_dev_attrs:
585 iwmct_dbgfs_unregister(priv->dbgfs);
586 sysfs_remove_group(&func->dev.kobj, &iwmct_attribute_group);
587error_sdio_enable:
588 sdio_release_host(func);
589 return ret;
590}
591
592static void iwmct_remove(struct sdio_func *func)
593{
594 struct iwmct_work_struct *read_req;
595 struct iwmct_priv *priv = sdio_get_drvdata(func);
596
597 LOG_INFO(priv, INIT, "enter\n");
598
599 sdio_claim_host(func);
600 sdio_release_irq(func);
601 sdio_release_host(func);
602
603 /* Make sure works are finished */
604 flush_work_sync(&priv->bus_rescan_worker);
605 flush_work_sync(&priv->isr_worker);
606
607 sdio_claim_host(func);
608 sdio_disable_func(func);
609 sysfs_remove_group(&func->dev.kobj, &iwmct_attribute_group);
610 iwmct_dbgfs_unregister(priv->dbgfs);
611 sdio_release_host(func);
612
613 /* free read requests */
614 while (!list_empty(&priv->read_req_list)) {
615 read_req = list_entry(priv->read_req_list.next,
616 struct iwmct_work_struct, list);
617
618 list_del(&read_req->list);
619 kfree(read_req);
620 }
621
622 kfree(priv);
623}
624
625
626static const struct sdio_device_id iwmct_ids[] = {
627 /* Intel Wireless MultiCom 3200 Top Driver */
628 { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1404)},
629 { }, /* Terminating entry */
630};
631
632MODULE_DEVICE_TABLE(sdio, iwmct_ids);
633
634static struct sdio_driver iwmct_driver = {
635 .probe = iwmct_probe,
636 .remove = iwmct_remove,
637 .name = DRV_NAME,
638 .id_table = iwmct_ids,
639};
640
641static int __init iwmct_init(void)
642{
643 int rc;
644
645 /* Default log filter settings */
646 iwmct_log_set_filter(LOG_SRC_ALL, LOG_SEV_FILTER_RUNTIME);
647 iwmct_log_set_filter(LOG_SRC_FW_MSG, LOG_SEV_FW_FILTER_ALL);
648 iwmct_log_set_fw_filter(LOG_SRC_ALL, FW_LOG_SEV_FILTER_RUNTIME);
649
650 rc = sdio_register_driver(&iwmct_driver);
651
652 return rc;
653}
654
655static void __exit iwmct_exit(void)
656{
657 sdio_unregister_driver(&iwmct_driver);
658}
659
660module_init(iwmct_init);
661module_exit(iwmct_exit);
662
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index dd5e04813b76..545c09ed9079 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -936,7 +936,7 @@ static int cops_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
936{ 936{
937 struct cops_local *lp = netdev_priv(dev); 937 struct cops_local *lp = netdev_priv(dev);
938 struct sockaddr_at *sa = (struct sockaddr_at *)&ifr->ifr_addr; 938 struct sockaddr_at *sa = (struct sockaddr_at *)&ifr->ifr_addr;
939 struct atalk_addr *aa = (struct atalk_addr *)&lp->node_addr; 939 struct atalk_addr *aa = &lp->node_addr;
940 940
941 switch(cmd) 941 switch(cmd)
942 { 942 {
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 3463b469e657..a030e635f001 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2454,24 +2454,27 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
2454out: 2454out:
2455 if (res) { 2455 if (res) {
2456 /* no suitable interface, frame not sent */ 2456 /* no suitable interface, frame not sent */
2457 dev_kfree_skb(skb); 2457 kfree_skb(skb);
2458 } 2458 }
2459 2459
2460 return NETDEV_TX_OK; 2460 return NETDEV_TX_OK;
2461} 2461}
2462 2462
2463int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond, 2463int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
2464 struct slave *slave) 2464 struct slave *slave)
2465{ 2465{
2466 int ret = RX_HANDLER_ANOTHER; 2466 int ret = RX_HANDLER_ANOTHER;
2467 struct lacpdu *lacpdu, _lacpdu;
2468
2467 if (skb->protocol != PKT_TYPE_LACPDU) 2469 if (skb->protocol != PKT_TYPE_LACPDU)
2468 return ret; 2470 return ret;
2469 2471
2470 if (!pskb_may_pull(skb, sizeof(struct lacpdu))) 2472 lacpdu = skb_header_pointer(skb, 0, sizeof(_lacpdu), &_lacpdu);
2473 if (!lacpdu)
2471 return ret; 2474 return ret;
2472 2475
2473 read_lock(&bond->lock); 2476 read_lock(&bond->lock);
2474 ret = bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len); 2477 ret = bond_3ad_rx_indication(lacpdu, slave, skb->len);
2475 read_unlock(&bond->lock); 2478 read_unlock(&bond->lock);
2476 return ret; 2479 return ret;
2477} 2480}
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index 5ee7e3c45db7..0cfaa4afdece 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -274,8 +274,8 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave);
274void bond_3ad_handle_link_change(struct slave *slave, char link); 274void bond_3ad_handle_link_change(struct slave *slave, char link);
275int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info); 275int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info);
276int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev); 276int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev);
277int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond, 277int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
278 struct slave *slave); 278 struct slave *slave);
279int bond_3ad_set_carrier(struct bonding *bond); 279int bond_3ad_set_carrier(struct bonding *bond);
280void bond_3ad_update_lacp_rate(struct bonding *bond); 280void bond_3ad_update_lacp_rate(struct bonding *bond);
281#endif //__BOND_3AD_H__ 281#endif //__BOND_3AD_H__
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 0f59c1564e53..e15cc11edbbe 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -342,27 +342,17 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
342 _unlock_rx_hashtbl_bh(bond); 342 _unlock_rx_hashtbl_bh(bond);
343} 343}
344 344
345static int rlb_arp_recv(struct sk_buff *skb, struct bonding *bond, 345static int rlb_arp_recv(const struct sk_buff *skb, struct bonding *bond,
346 struct slave *slave) 346 struct slave *slave)
347{ 347{
348 struct arp_pkt *arp; 348 struct arp_pkt *arp, _arp;
349 349
350 if (skb->protocol != cpu_to_be16(ETH_P_ARP)) 350 if (skb->protocol != cpu_to_be16(ETH_P_ARP))
351 goto out; 351 goto out;
352 352
353 arp = (struct arp_pkt *) skb->data; 353 arp = skb_header_pointer(skb, 0, sizeof(_arp), &_arp);
354 if (!arp) { 354 if (!arp)
355 pr_debug("Packet has no ARP data\n");
356 goto out; 355 goto out;
357 }
358
359 if (!pskb_may_pull(skb, arp_hdr_len(bond->dev)))
360 goto out;
361
362 if (skb->len < sizeof(struct arp_pkt)) {
363 pr_debug("Packet is too small to be an ARP\n");
364 goto out;
365 }
366 356
367 if (arp->op_code == htons(ARPOP_REPLY)) { 357 if (arp->op_code == htons(ARPOP_REPLY)) {
368 /* update rx hash table for this ARP */ 358 /* update rx hash table for this ARP */
@@ -1356,12 +1346,12 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1356 } 1346 }
1357 } 1347 }
1358 1348
1349 read_unlock(&bond->curr_slave_lock);
1350
1359 if (res) { 1351 if (res) {
1360 /* no suitable interface, frame not sent */ 1352 /* no suitable interface, frame not sent */
1361 dev_kfree_skb(skb); 1353 kfree_skb(skb);
1362 } 1354 }
1363 read_unlock(&bond->curr_slave_lock);
1364
1365 return NETDEV_TX_OK; 1355 return NETDEV_TX_OK;
1366} 1356}
1367 1357
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2ee76993f052..3960b1b26178 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1240,9 +1240,7 @@ static inline int slave_enable_netpoll(struct slave *slave)
1240 if (!np) 1240 if (!np)
1241 goto out; 1241 goto out;
1242 1242
1243 np->dev = slave->dev; 1243 err = __netpoll_setup(np, slave->dev);
1244 strlcpy(np->dev_name, slave->dev->name, IFNAMSIZ);
1245 err = __netpoll_setup(np);
1246 if (err) { 1244 if (err) {
1247 kfree(np); 1245 kfree(np);
1248 goto out; 1246 goto out;
@@ -1384,6 +1382,7 @@ static void bond_compute_features(struct bonding *bond)
1384 netdev_features_t vlan_features = BOND_VLAN_FEATURES; 1382 netdev_features_t vlan_features = BOND_VLAN_FEATURES;
1385 unsigned short max_hard_header_len = ETH_HLEN; 1383 unsigned short max_hard_header_len = ETH_HLEN;
1386 int i; 1384 int i;
1385 unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
1387 1386
1388 read_lock(&bond->lock); 1387 read_lock(&bond->lock);
1389 1388
@@ -1394,6 +1393,7 @@ static void bond_compute_features(struct bonding *bond)
1394 vlan_features = netdev_increment_features(vlan_features, 1393 vlan_features = netdev_increment_features(vlan_features,
1395 slave->dev->vlan_features, BOND_VLAN_FEATURES); 1394 slave->dev->vlan_features, BOND_VLAN_FEATURES);
1396 1395
1396 dst_release_flag &= slave->dev->priv_flags;
1397 if (slave->dev->hard_header_len > max_hard_header_len) 1397 if (slave->dev->hard_header_len > max_hard_header_len)
1398 max_hard_header_len = slave->dev->hard_header_len; 1398 max_hard_header_len = slave->dev->hard_header_len;
1399 } 1399 }
@@ -1402,6 +1402,9 @@ done:
1402 bond_dev->vlan_features = vlan_features; 1402 bond_dev->vlan_features = vlan_features;
1403 bond_dev->hard_header_len = max_hard_header_len; 1403 bond_dev->hard_header_len = max_hard_header_len;
1404 1404
1405 flags = bond_dev->priv_flags & ~IFF_XMIT_DST_RELEASE;
1406 bond_dev->priv_flags = flags | dst_release_flag;
1407
1405 read_unlock(&bond->lock); 1408 read_unlock(&bond->lock);
1406 1409
1407 netdev_change_features(bond_dev); 1410 netdev_change_features(bond_dev);
@@ -1445,8 +1448,8 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1445 struct sk_buff *skb = *pskb; 1448 struct sk_buff *skb = *pskb;
1446 struct slave *slave; 1449 struct slave *slave;
1447 struct bonding *bond; 1450 struct bonding *bond;
1448 int (*recv_probe)(struct sk_buff *, struct bonding *, 1451 int (*recv_probe)(const struct sk_buff *, struct bonding *,
1449 struct slave *); 1452 struct slave *);
1450 int ret = RX_HANDLER_ANOTHER; 1453 int ret = RX_HANDLER_ANOTHER;
1451 1454
1452 skb = skb_share_check(skb, GFP_ATOMIC); 1455 skb = skb_share_check(skb, GFP_ATOMIC);
@@ -1463,15 +1466,10 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1463 1466
1464 recv_probe = ACCESS_ONCE(bond->recv_probe); 1467 recv_probe = ACCESS_ONCE(bond->recv_probe);
1465 if (recv_probe) { 1468 if (recv_probe) {
1466 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); 1469 ret = recv_probe(skb, bond, slave);
1467 1470 if (ret == RX_HANDLER_CONSUMED) {
1468 if (likely(nskb)) { 1471 consume_skb(skb);
1469 ret = recv_probe(nskb, bond, slave); 1472 return ret;
1470 dev_kfree_skb(nskb);
1471 if (ret == RX_HANDLER_CONSUMED) {
1472 consume_skb(skb);
1473 return ret;
1474 }
1475 } 1473 }
1476 } 1474 }
1477 1475
@@ -2738,25 +2736,31 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
2738 } 2736 }
2739} 2737}
2740 2738
2741static int bond_arp_rcv(struct sk_buff *skb, struct bonding *bond, 2739static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
2742 struct slave *slave) 2740 struct slave *slave)
2743{ 2741{
2744 struct arphdr *arp; 2742 struct arphdr *arp = (struct arphdr *)skb->data;
2745 unsigned char *arp_ptr; 2743 unsigned char *arp_ptr;
2746 __be32 sip, tip; 2744 __be32 sip, tip;
2745 int alen;
2747 2746
2748 if (skb->protocol != __cpu_to_be16(ETH_P_ARP)) 2747 if (skb->protocol != __cpu_to_be16(ETH_P_ARP))
2749 return RX_HANDLER_ANOTHER; 2748 return RX_HANDLER_ANOTHER;
2750 2749
2751 read_lock(&bond->lock); 2750 read_lock(&bond->lock);
2751 alen = arp_hdr_len(bond->dev);
2752 2752
2753 pr_debug("bond_arp_rcv: bond %s skb->dev %s\n", 2753 pr_debug("bond_arp_rcv: bond %s skb->dev %s\n",
2754 bond->dev->name, skb->dev->name); 2754 bond->dev->name, skb->dev->name);
2755 2755
2756 if (!pskb_may_pull(skb, arp_hdr_len(bond->dev))) 2756 if (alen > skb_headlen(skb)) {
2757 goto out_unlock; 2757 arp = kmalloc(alen, GFP_ATOMIC);
2758 if (!arp)
2759 goto out_unlock;
2760 if (skb_copy_bits(skb, 0, arp, alen) < 0)
2761 goto out_unlock;
2762 }
2758 2763
2759 arp = arp_hdr(skb);
2760 if (arp->ar_hln != bond->dev->addr_len || 2764 if (arp->ar_hln != bond->dev->addr_len ||
2761 skb->pkt_type == PACKET_OTHERHOST || 2765 skb->pkt_type == PACKET_OTHERHOST ||
2762 skb->pkt_type == PACKET_LOOPBACK || 2766 skb->pkt_type == PACKET_LOOPBACK ||
@@ -2791,6 +2795,8 @@ static int bond_arp_rcv(struct sk_buff *skb, struct bonding *bond,
2791 2795
2792out_unlock: 2796out_unlock:
2793 read_unlock(&bond->lock); 2797 read_unlock(&bond->lock);
2798 if (arp != (struct arphdr *)skb->data)
2799 kfree(arp);
2794 return RX_HANDLER_ANOTHER; 2800 return RX_HANDLER_ANOTHER;
2795} 2801}
2796 2802
@@ -3993,7 +3999,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
3993out: 3999out:
3994 if (res) { 4000 if (res) {
3995 /* no suitable interface, frame not sent */ 4001 /* no suitable interface, frame not sent */
3996 dev_kfree_skb(skb); 4002 kfree_skb(skb);
3997 } 4003 }
3998 4004
3999 return NETDEV_TX_OK; 4005 return NETDEV_TX_OK;
@@ -4015,11 +4021,11 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
4015 res = bond_dev_queue_xmit(bond, skb, 4021 res = bond_dev_queue_xmit(bond, skb,
4016 bond->curr_active_slave->dev); 4022 bond->curr_active_slave->dev);
4017 4023
4024 read_unlock(&bond->curr_slave_lock);
4025
4018 if (res) 4026 if (res)
4019 /* no suitable interface, frame not sent */ 4027 /* no suitable interface, frame not sent */
4020 dev_kfree_skb(skb); 4028 kfree_skb(skb);
4021
4022 read_unlock(&bond->curr_slave_lock);
4023 4029
4024 return NETDEV_TX_OK; 4030 return NETDEV_TX_OK;
4025} 4031}
@@ -4058,7 +4064,7 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
4058 4064
4059 if (res) { 4065 if (res) {
4060 /* no suitable interface, frame not sent */ 4066 /* no suitable interface, frame not sent */
4061 dev_kfree_skb(skb); 4067 kfree_skb(skb);
4062 } 4068 }
4063 4069
4064 return NETDEV_TX_OK; 4070 return NETDEV_TX_OK;
@@ -4096,7 +4102,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
4096 4102
4097 res = bond_dev_queue_xmit(bond, skb2, tx_dev); 4103 res = bond_dev_queue_xmit(bond, skb2, tx_dev);
4098 if (res) { 4104 if (res) {
4099 dev_kfree_skb(skb2); 4105 kfree_skb(skb2);
4100 continue; 4106 continue;
4101 } 4107 }
4102 } 4108 }
@@ -4110,7 +4116,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
4110out: 4116out:
4111 if (res) 4117 if (res)
4112 /* no suitable interface, frame not sent */ 4118 /* no suitable interface, frame not sent */
4113 dev_kfree_skb(skb); 4119 kfree_skb(skb);
4114 4120
4115 /* frame sent to all suitable interfaces */ 4121 /* frame sent to all suitable interfaces */
4116 return NETDEV_TX_OK; 4122 return NETDEV_TX_OK;
@@ -4216,7 +4222,7 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
4216 pr_err("%s: Error: Unknown bonding mode %d\n", 4222 pr_err("%s: Error: Unknown bonding mode %d\n",
4217 dev->name, bond->params.mode); 4223 dev->name, bond->params.mode);
4218 WARN_ON_ONCE(1); 4224 WARN_ON_ONCE(1);
4219 dev_kfree_skb(skb); 4225 kfree_skb(skb);
4220 return NETDEV_TX_OK; 4226 return NETDEV_TX_OK;
4221 } 4227 }
4222} 4228}
@@ -4238,7 +4244,7 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
4238 if (bond->slave_cnt) 4244 if (bond->slave_cnt)
4239 ret = __bond_start_xmit(skb, dev); 4245 ret = __bond_start_xmit(skb, dev);
4240 else 4246 else
4241 dev_kfree_skb(skb); 4247 kfree_skb(skb);
4242 4248
4243 read_unlock(&bond->lock); 4249 read_unlock(&bond->lock);
4244 4250
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 4581aa5ccaba..f8af2fcd3d16 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -218,8 +218,8 @@ struct bonding {
218 struct slave *primary_slave; 218 struct slave *primary_slave;
219 bool force_primary; 219 bool force_primary;
220 s32 slave_cnt; /* never change this value outside the attach/detach wrappers */ 220 s32 slave_cnt; /* never change this value outside the attach/detach wrappers */
221 int (*recv_probe)(struct sk_buff *, struct bonding *, 221 int (*recv_probe)(const struct sk_buff *, struct bonding *,
222 struct slave *); 222 struct slave *);
223 rwlock_t lock; 223 rwlock_t lock;
224 rwlock_t curr_slave_lock; 224 rwlock_t curr_slave_lock;
225 u8 send_peer_notif; 225 u8 send_peer_notif;
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 4a27adb7ae67..0def8b3106f4 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -11,7 +11,6 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/device.h> 13#include <linux/device.h>
14#include <linux/platform_device.h>
15#include <linux/netdevice.h> 14#include <linux/netdevice.h>
16#include <linux/string.h> 15#include <linux/string.h>
17#include <linux/list.h> 16#include <linux/list.h>
@@ -20,7 +19,7 @@
20#include <linux/sched.h> 19#include <linux/sched.h>
21#include <linux/if_arp.h> 20#include <linux/if_arp.h>
22#include <linux/timer.h> 21#include <linux/timer.h>
23#include <linux/rtnetlink.h> 22#include <net/rtnetlink.h>
24#include <linux/pkt_sched.h> 23#include <linux/pkt_sched.h>
25#include <net/caif/caif_layer.h> 24#include <net/caif/caif_layer.h>
26#include <net/caif/caif_hsi.h> 25#include <net/caif/caif_hsi.h>
@@ -33,59 +32,46 @@ MODULE_DESCRIPTION("CAIF HSI driver");
33#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\ 32#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
34 (((pow)-((x)&((pow)-1))))) 33 (((pow)-((x)&((pow)-1)))))
35 34
36static int inactivity_timeout = 1000; 35static const struct cfhsi_config hsi_default_config = {
37module_param(inactivity_timeout, int, S_IRUGO | S_IWUSR);
38MODULE_PARM_DESC(inactivity_timeout, "Inactivity timeout on HSI, ms.");
39 36
40static int aggregation_timeout = 1; 37 /* Inactivity timeout on HSI, ms */
41module_param(aggregation_timeout, int, S_IRUGO | S_IWUSR); 38 .inactivity_timeout = HZ,
42MODULE_PARM_DESC(aggregation_timeout, "Aggregation timeout on HSI, ms.");
43 39
44/* 40 /* Aggregation timeout (ms) of zero means no aggregation is done*/
45 * HSI padding options. 41 .aggregation_timeout = 1,
46 * Warning: must be a base of 2 (& operation used) and can not be zero !
47 */
48static int hsi_head_align = 4;
49module_param(hsi_head_align, int, S_IRUGO);
50MODULE_PARM_DESC(hsi_head_align, "HSI head alignment.");
51 42
52static int hsi_tail_align = 4; 43 /*
53module_param(hsi_tail_align, int, S_IRUGO); 44 * HSI link layer flow-control thresholds.
54MODULE_PARM_DESC(hsi_tail_align, "HSI tail alignment."); 45 * Threshold values for the HSI packet queue. Flow-control will be
55 46 * asserted when the number of packets exceeds q_high_mark. It will
56/* 47 * not be de-asserted before the number of packets drops below
57 * HSI link layer flowcontrol thresholds. 48 * q_low_mark.
58 * Warning: A high threshold value migth increase throughput but it will at 49 * Warning: A high threshold value might increase throughput but it
59 * the same time prevent channel prioritization and increase the risk of 50 * will at the same time prevent channel prioritization and increase
60 * flooding the modem. The high threshold should be above the low. 51 * the risk of flooding the modem. The high threshold should be above
61 */ 52 * the low.
62static int hsi_high_threshold = 100; 53 */
63module_param(hsi_high_threshold, int, S_IRUGO); 54 .q_high_mark = 100,
64MODULE_PARM_DESC(hsi_high_threshold, "HSI high threshold (FLOW OFF)."); 55 .q_low_mark = 50,
65 56
66static int hsi_low_threshold = 50; 57 /*
67module_param(hsi_low_threshold, int, S_IRUGO); 58 * HSI padding options.
68MODULE_PARM_DESC(hsi_low_threshold, "HSI high threshold (FLOW ON)."); 59 * Warning: must be a base of 2 (& operation used) and can not be zero !
60 */
61 .head_align = 4,
62 .tail_align = 4,
63};
69 64
70#define ON 1 65#define ON 1
71#define OFF 0 66#define OFF 0
72 67
73/*
74 * Threshold values for the HSI packet queue. Flowcontrol will be asserted
75 * when the number of packets exceeds HIGH_WATER_MARK. It will not be
76 * de-asserted before the number of packets drops below LOW_WATER_MARK.
77 */
78#define LOW_WATER_MARK hsi_low_threshold
79#define HIGH_WATER_MARK hsi_high_threshold
80
81static LIST_HEAD(cfhsi_list); 68static LIST_HEAD(cfhsi_list);
82static spinlock_t cfhsi_list_lock;
83 69
84static void cfhsi_inactivity_tout(unsigned long arg) 70static void cfhsi_inactivity_tout(unsigned long arg)
85{ 71{
86 struct cfhsi *cfhsi = (struct cfhsi *)arg; 72 struct cfhsi *cfhsi = (struct cfhsi *)arg;
87 73
88 dev_dbg(&cfhsi->ndev->dev, "%s.\n", 74 netdev_dbg(cfhsi->ndev, "%s.\n",
89 __func__); 75 __func__);
90 76
91 /* Schedule power down work queue. */ 77 /* Schedule power down work queue. */
@@ -101,8 +87,8 @@ static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi,
101 int hpad, tpad, len; 87 int hpad, tpad, len;
102 88
103 info = (struct caif_payload_info *)&skb->cb; 89 info = (struct caif_payload_info *)&skb->cb;
104 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align); 90 hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
105 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align); 91 tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
106 len = skb->len + hpad + tpad; 92 len = skb->len + hpad + tpad;
107 93
108 if (direction > 0) 94 if (direction > 0)
@@ -115,7 +101,7 @@ static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi)
115{ 101{
116 int i; 102 int i;
117 103
118 if (cfhsi->aggregation_timeout < 0) 104 if (cfhsi->cfg.aggregation_timeout == 0)
119 return true; 105 return true;
120 106
121 for (i = 0; i < CFHSI_PRIO_BEBK; ++i) { 107 for (i = 0; i < CFHSI_PRIO_BEBK; ++i) {
@@ -171,7 +157,7 @@ static void cfhsi_abort_tx(struct cfhsi *cfhsi)
171 cfhsi->tx_state = CFHSI_TX_STATE_IDLE; 157 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
172 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 158 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
173 mod_timer(&cfhsi->inactivity_timer, 159 mod_timer(&cfhsi->inactivity_timer,
174 jiffies + cfhsi->inactivity_timeout); 160 jiffies + cfhsi->cfg.inactivity_timeout);
175 spin_unlock_bh(&cfhsi->lock); 161 spin_unlock_bh(&cfhsi->lock);
176} 162}
177 163
@@ -181,14 +167,14 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
181 size_t fifo_occupancy; 167 size_t fifo_occupancy;
182 int ret; 168 int ret;
183 169
184 dev_dbg(&cfhsi->ndev->dev, "%s.\n", 170 netdev_dbg(cfhsi->ndev, "%s.\n",
185 __func__); 171 __func__);
186 172
187 do { 173 do {
188 ret = cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev, 174 ret = cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
189 &fifo_occupancy); 175 &fifo_occupancy);
190 if (ret) { 176 if (ret) {
191 dev_warn(&cfhsi->ndev->dev, 177 netdev_warn(cfhsi->ndev,
192 "%s: can't get FIFO occupancy: %d.\n", 178 "%s: can't get FIFO occupancy: %d.\n",
193 __func__, ret); 179 __func__, ret);
194 break; 180 break;
@@ -198,11 +184,11 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
198 184
199 fifo_occupancy = min(sizeof(buffer), fifo_occupancy); 185 fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
200 set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits); 186 set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
201 ret = cfhsi->dev->cfhsi_rx(buffer, fifo_occupancy, 187 ret = cfhsi->ops->cfhsi_rx(buffer, fifo_occupancy,
202 cfhsi->dev); 188 cfhsi->ops);
203 if (ret) { 189 if (ret) {
204 clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits); 190 clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
205 dev_warn(&cfhsi->ndev->dev, 191 netdev_warn(cfhsi->ndev,
206 "%s: can't read data: %d.\n", 192 "%s: can't read data: %d.\n",
207 __func__, ret); 193 __func__, ret);
208 break; 194 break;
@@ -213,13 +199,13 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
213 !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret); 199 !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
214 200
215 if (ret < 0) { 201 if (ret < 0) {
216 dev_warn(&cfhsi->ndev->dev, 202 netdev_warn(cfhsi->ndev,
217 "%s: can't wait for flush complete: %d.\n", 203 "%s: can't wait for flush complete: %d.\n",
218 __func__, ret); 204 __func__, ret);
219 break; 205 break;
220 } else if (!ret) { 206 } else if (!ret) {
221 ret = -ETIMEDOUT; 207 ret = -ETIMEDOUT;
222 dev_warn(&cfhsi->ndev->dev, 208 netdev_warn(cfhsi->ndev,
223 "%s: timeout waiting for flush complete.\n", 209 "%s: timeout waiting for flush complete.\n",
224 __func__); 210 __func__);
225 break; 211 break;
@@ -246,14 +232,14 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
246 /* Check if we can embed a CAIF frame. */ 232 /* Check if we can embed a CAIF frame. */
247 if (skb->len < CFHSI_MAX_EMB_FRM_SZ) { 233 if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
248 struct caif_payload_info *info; 234 struct caif_payload_info *info;
249 int hpad = 0; 235 int hpad;
250 int tpad = 0; 236 int tpad;
251 237
252 /* Calculate needed head alignment and tail alignment. */ 238 /* Calculate needed head alignment and tail alignment. */
253 info = (struct caif_payload_info *)&skb->cb; 239 info = (struct caif_payload_info *)&skb->cb;
254 240
255 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align); 241 hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
256 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align); 242 tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
257 243
258 /* Check if frame still fits with added alignment. */ 244 /* Check if frame still fits with added alignment. */
259 if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) { 245 if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
@@ -282,8 +268,8 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
282 pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ; 268 pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
283 while (nfrms < CFHSI_MAX_PKTS) { 269 while (nfrms < CFHSI_MAX_PKTS) {
284 struct caif_payload_info *info; 270 struct caif_payload_info *info;
285 int hpad = 0; 271 int hpad;
286 int tpad = 0; 272 int tpad;
287 273
288 if (!skb) 274 if (!skb)
289 skb = cfhsi_dequeue(cfhsi); 275 skb = cfhsi_dequeue(cfhsi);
@@ -294,8 +280,8 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
294 /* Calculate needed head alignment and tail alignment. */ 280 /* Calculate needed head alignment and tail alignment. */
295 info = (struct caif_payload_info *)&skb->cb; 281 info = (struct caif_payload_info *)&skb->cb;
296 282
297 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align); 283 hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
298 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align); 284 tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
299 285
300 /* Fill in CAIF frame length in descriptor. */ 286 /* Fill in CAIF frame length in descriptor. */
301 desc->cffrm_len[nfrms] = hpad + skb->len + tpad; 287 desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
@@ -348,7 +334,7 @@ static void cfhsi_start_tx(struct cfhsi *cfhsi)
348 struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf; 334 struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
349 int len, res; 335 int len, res;
350 336
351 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__); 337 netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
352 338
353 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 339 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
354 return; 340 return;
@@ -366,22 +352,22 @@ static void cfhsi_start_tx(struct cfhsi *cfhsi)
366 cfhsi->tx_state = CFHSI_TX_STATE_IDLE; 352 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
367 /* Start inactivity timer. */ 353 /* Start inactivity timer. */
368 mod_timer(&cfhsi->inactivity_timer, 354 mod_timer(&cfhsi->inactivity_timer,
369 jiffies + cfhsi->inactivity_timeout); 355 jiffies + cfhsi->cfg.inactivity_timeout);
370 spin_unlock_bh(&cfhsi->lock); 356 spin_unlock_bh(&cfhsi->lock);
371 break; 357 break;
372 } 358 }
373 359
374 /* Set up new transfer. */ 360 /* Set up new transfer. */
375 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev); 361 res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
376 if (WARN_ON(res < 0)) 362 if (WARN_ON(res < 0))
377 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n", 363 netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
378 __func__, res); 364 __func__, res);
379 } while (res < 0); 365 } while (res < 0);
380} 366}
381 367
382static void cfhsi_tx_done(struct cfhsi *cfhsi) 368static void cfhsi_tx_done(struct cfhsi *cfhsi)
383{ 369{
384 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__); 370 netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
385 371
386 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 372 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
387 return; 373 return;
@@ -392,7 +378,7 @@ static void cfhsi_tx_done(struct cfhsi *cfhsi)
392 */ 378 */
393 spin_lock_bh(&cfhsi->lock); 379 spin_lock_bh(&cfhsi->lock);
394 if (cfhsi->flow_off_sent && 380 if (cfhsi->flow_off_sent &&
395 cfhsi_tx_queue_len(cfhsi) <= cfhsi->q_low_mark && 381 cfhsi_tx_queue_len(cfhsi) <= cfhsi->cfg.q_low_mark &&
396 cfhsi->cfdev.flowctrl) { 382 cfhsi->cfdev.flowctrl) {
397 383
398 cfhsi->flow_off_sent = 0; 384 cfhsi->flow_off_sent = 0;
@@ -404,19 +390,19 @@ static void cfhsi_tx_done(struct cfhsi *cfhsi)
404 cfhsi_start_tx(cfhsi); 390 cfhsi_start_tx(cfhsi);
405 } else { 391 } else {
406 mod_timer(&cfhsi->aggregation_timer, 392 mod_timer(&cfhsi->aggregation_timer,
407 jiffies + cfhsi->aggregation_timeout); 393 jiffies + cfhsi->cfg.aggregation_timeout);
408 spin_unlock_bh(&cfhsi->lock); 394 spin_unlock_bh(&cfhsi->lock);
409 } 395 }
410 396
411 return; 397 return;
412} 398}
413 399
414static void cfhsi_tx_done_cb(struct cfhsi_drv *drv) 400static void cfhsi_tx_done_cb(struct cfhsi_cb_ops *cb_ops)
415{ 401{
416 struct cfhsi *cfhsi; 402 struct cfhsi *cfhsi;
417 403
418 cfhsi = container_of(drv, struct cfhsi, drv); 404 cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
419 dev_dbg(&cfhsi->ndev->dev, "%s.\n", 405 netdev_dbg(cfhsi->ndev, "%s.\n",
420 __func__); 406 __func__);
421 407
422 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 408 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
@@ -433,7 +419,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
433 419
434 if ((desc->header & ~CFHSI_PIGGY_DESC) || 420 if ((desc->header & ~CFHSI_PIGGY_DESC) ||
435 (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) { 421 (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
436 dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n", 422 netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
437 __func__); 423 __func__);
438 return -EPROTO; 424 return -EPROTO;
439 } 425 }
@@ -455,7 +441,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
455 441
456 /* Sanity check length of CAIF frame. */ 442 /* Sanity check length of CAIF frame. */
457 if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) { 443 if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
458 dev_err(&cfhsi->ndev->dev, "%s: Invalid length.\n", 444 netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
459 __func__); 445 __func__);
460 return -EPROTO; 446 return -EPROTO;
461 } 447 }
@@ -463,7 +449,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
463 /* Allocate SKB (OK even in IRQ context). */ 449 /* Allocate SKB (OK even in IRQ context). */
464 skb = alloc_skb(len + 1, GFP_ATOMIC); 450 skb = alloc_skb(len + 1, GFP_ATOMIC);
465 if (!skb) { 451 if (!skb) {
466 dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n", 452 netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
467 __func__); 453 __func__);
468 return -ENOMEM; 454 return -ENOMEM;
469 } 455 }
@@ -477,8 +463,8 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
477 skb->dev = cfhsi->ndev; 463 skb->dev = cfhsi->ndev;
478 464
479 /* 465 /*
480 * We are called from a arch specific platform device. 466 * We are in a callback handler and
481 * Unfortunately we don't know what context we're 467 * unfortunately we don't know what context we're
482 * running in. 468 * running in.
483 */ 469 */
484 if (in_interrupt()) 470 if (in_interrupt())
@@ -504,7 +490,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
504 xfer_sz += CFHSI_DESC_SZ; 490 xfer_sz += CFHSI_DESC_SZ;
505 491
506 if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) { 492 if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) {
507 dev_err(&cfhsi->ndev->dev, 493 netdev_err(cfhsi->ndev,
508 "%s: Invalid payload len: %d, ignored.\n", 494 "%s: Invalid payload len: %d, ignored.\n",
509 __func__, xfer_sz); 495 __func__, xfer_sz);
510 return -EPROTO; 496 return -EPROTO;
@@ -551,7 +537,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
551 /* Sanity check header and offset. */ 537 /* Sanity check header and offset. */
552 if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) || 538 if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
553 (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) { 539 (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
554 dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n", 540 netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
555 __func__); 541 __func__);
556 return -EPROTO; 542 return -EPROTO;
557 } 543 }
@@ -573,7 +559,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
573 struct sk_buff *skb; 559 struct sk_buff *skb;
574 u8 *dst = NULL; 560 u8 *dst = NULL;
575 u8 *pcffrm = NULL; 561 u8 *pcffrm = NULL;
576 int len = 0; 562 int len;
577 563
578 /* CAIF frame starts after head padding. */ 564 /* CAIF frame starts after head padding. */
579 pcffrm = pfrm + *pfrm + 1; 565 pcffrm = pfrm + *pfrm + 1;
@@ -585,7 +571,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
585 571
586 /* Sanity check length of CAIF frames. */ 572 /* Sanity check length of CAIF frames. */
587 if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) { 573 if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
588 dev_err(&cfhsi->ndev->dev, "%s: Invalid length.\n", 574 netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
589 __func__); 575 __func__);
590 return -EPROTO; 576 return -EPROTO;
591 } 577 }
@@ -593,7 +579,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
593 /* Allocate SKB (OK even in IRQ context). */ 579 /* Allocate SKB (OK even in IRQ context). */
594 skb = alloc_skb(len + 1, GFP_ATOMIC); 580 skb = alloc_skb(len + 1, GFP_ATOMIC);
595 if (!skb) { 581 if (!skb) {
596 dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n", 582 netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
597 __func__); 583 __func__);
598 cfhsi->rx_state.nfrms = nfrms; 584 cfhsi->rx_state.nfrms = nfrms;
599 return -ENOMEM; 585 return -ENOMEM;
@@ -608,7 +594,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
608 skb->dev = cfhsi->ndev; 594 skb->dev = cfhsi->ndev;
609 595
610 /* 596 /*
611 * We're called from a platform device, 597 * We're called in callback from HSI
612 * and don't know the context we're running in. 598 * and don't know the context we're running in.
613 */ 599 */
614 if (in_interrupt()) 600 if (in_interrupt())
@@ -639,7 +625,7 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
639 625
640 desc = (struct cfhsi_desc *)cfhsi->rx_buf; 626 desc = (struct cfhsi_desc *)cfhsi->rx_buf;
641 627
642 dev_dbg(&cfhsi->ndev->dev, "%s\n", __func__); 628 netdev_dbg(cfhsi->ndev, "%s\n", __func__);
643 629
644 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 630 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
645 return; 631 return;
@@ -647,7 +633,7 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
647 /* Update inactivity timer if pending. */ 633 /* Update inactivity timer if pending. */
648 spin_lock_bh(&cfhsi->lock); 634 spin_lock_bh(&cfhsi->lock);
649 mod_timer_pending(&cfhsi->inactivity_timer, 635 mod_timer_pending(&cfhsi->inactivity_timer,
650 jiffies + cfhsi->inactivity_timeout); 636 jiffies + cfhsi->cfg.inactivity_timeout);
651 spin_unlock_bh(&cfhsi->lock); 637 spin_unlock_bh(&cfhsi->lock);
652 638
653 if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) { 639 if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
@@ -680,12 +666,11 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
680 if (desc_pld_len < 0) 666 if (desc_pld_len < 0)
681 goto out_of_sync; 667 goto out_of_sync;
682 668
683 if (desc_pld_len > 0) 669 if (desc_pld_len > 0) {
684 rx_len = desc_pld_len; 670 rx_len = desc_pld_len;
685 671 if (piggy_desc->header & CFHSI_PIGGY_DESC)
686 if (desc_pld_len > 0 && 672 rx_len += CFHSI_DESC_SZ;
687 (piggy_desc->header & CFHSI_PIGGY_DESC)) 673 }
688 rx_len += CFHSI_DESC_SZ;
689 674
690 /* 675 /*
691 * Copy needed information from the piggy-backed 676 * Copy needed information from the piggy-backed
@@ -693,8 +678,6 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
693 */ 678 */
694 memcpy(rx_buf, (u8 *)piggy_desc, 679 memcpy(rx_buf, (u8 *)piggy_desc,
695 CFHSI_DESC_SHORT_SZ); 680 CFHSI_DESC_SHORT_SZ);
696 if (desc_pld_len == -EPROTO)
697 goto out_of_sync;
698 } 681 }
699 } 682 }
700 683
@@ -710,13 +693,13 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
710 /* Initiate next read */ 693 /* Initiate next read */
711 if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) { 694 if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
712 /* Set up new transfer. */ 695 /* Set up new transfer. */
713 dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n", 696 netdev_dbg(cfhsi->ndev, "%s: Start RX.\n",
714 __func__); 697 __func__);
715 698
716 res = cfhsi->dev->cfhsi_rx(rx_ptr, rx_len, 699 res = cfhsi->ops->cfhsi_rx(rx_ptr, rx_len,
717 cfhsi->dev); 700 cfhsi->ops);
718 if (WARN_ON(res < 0)) { 701 if (WARN_ON(res < 0)) {
719 dev_err(&cfhsi->ndev->dev, "%s: RX error %d.\n", 702 netdev_err(cfhsi->ndev, "%s: RX error %d.\n",
720 __func__, res); 703 __func__, res);
721 cfhsi->ndev->stats.rx_errors++; 704 cfhsi->ndev->stats.rx_errors++;
722 cfhsi->ndev->stats.rx_dropped++; 705 cfhsi->ndev->stats.rx_dropped++;
@@ -753,7 +736,7 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
753 return; 736 return;
754 737
755out_of_sync: 738out_of_sync:
756 dev_err(&cfhsi->ndev->dev, "%s: Out of sync.\n", __func__); 739 netdev_err(cfhsi->ndev, "%s: Out of sync.\n", __func__);
757 print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE, 740 print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE,
758 cfhsi->rx_buf, CFHSI_DESC_SZ); 741 cfhsi->rx_buf, CFHSI_DESC_SZ);
759 schedule_work(&cfhsi->out_of_sync_work); 742 schedule_work(&cfhsi->out_of_sync_work);
@@ -763,18 +746,18 @@ static void cfhsi_rx_slowpath(unsigned long arg)
763{ 746{
764 struct cfhsi *cfhsi = (struct cfhsi *)arg; 747 struct cfhsi *cfhsi = (struct cfhsi *)arg;
765 748
766 dev_dbg(&cfhsi->ndev->dev, "%s.\n", 749 netdev_dbg(cfhsi->ndev, "%s.\n",
767 __func__); 750 __func__);
768 751
769 cfhsi_rx_done(cfhsi); 752 cfhsi_rx_done(cfhsi);
770} 753}
771 754
772static void cfhsi_rx_done_cb(struct cfhsi_drv *drv) 755static void cfhsi_rx_done_cb(struct cfhsi_cb_ops *cb_ops)
773{ 756{
774 struct cfhsi *cfhsi; 757 struct cfhsi *cfhsi;
775 758
776 cfhsi = container_of(drv, struct cfhsi, drv); 759 cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
777 dev_dbg(&cfhsi->ndev->dev, "%s.\n", 760 netdev_dbg(cfhsi->ndev, "%s.\n",
778 __func__); 761 __func__);
779 762
780 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 763 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
@@ -807,9 +790,9 @@ static void cfhsi_wake_up(struct work_struct *work)
807 } 790 }
808 791
809 /* Activate wake line. */ 792 /* Activate wake line. */
810 cfhsi->dev->cfhsi_wake_up(cfhsi->dev); 793 cfhsi->ops->cfhsi_wake_up(cfhsi->ops);
811 794
812 dev_dbg(&cfhsi->ndev->dev, "%s: Start waiting.\n", 795 netdev_dbg(cfhsi->ndev, "%s: Start waiting.\n",
813 __func__); 796 __func__);
814 797
815 /* Wait for acknowledge. */ 798 /* Wait for acknowledge. */
@@ -819,33 +802,33 @@ static void cfhsi_wake_up(struct work_struct *work)
819 &cfhsi->bits), ret); 802 &cfhsi->bits), ret);
820 if (unlikely(ret < 0)) { 803 if (unlikely(ret < 0)) {
821 /* Interrupted by signal. */ 804 /* Interrupted by signal. */
822 dev_err(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n", 805 netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
823 __func__, ret); 806 __func__, ret);
824 807
825 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); 808 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
826 cfhsi->dev->cfhsi_wake_down(cfhsi->dev); 809 cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
827 return; 810 return;
828 } else if (!ret) { 811 } else if (!ret) {
829 bool ca_wake = false; 812 bool ca_wake = false;
830 size_t fifo_occupancy = 0; 813 size_t fifo_occupancy = 0;
831 814
832 /* Wakeup timeout */ 815 /* Wakeup timeout */
833 dev_dbg(&cfhsi->ndev->dev, "%s: Timeout.\n", 816 netdev_dbg(cfhsi->ndev, "%s: Timeout.\n",
834 __func__); 817 __func__);
835 818
836 /* Check FIFO to check if modem has sent something. */ 819 /* Check FIFO to check if modem has sent something. */
837 WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev, 820 WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
838 &fifo_occupancy)); 821 &fifo_occupancy));
839 822
840 dev_dbg(&cfhsi->ndev->dev, "%s: Bytes in FIFO: %u.\n", 823 netdev_dbg(cfhsi->ndev, "%s: Bytes in FIFO: %u.\n",
841 __func__, (unsigned) fifo_occupancy); 824 __func__, (unsigned) fifo_occupancy);
842 825
843 /* Check if we misssed the interrupt. */ 826 /* Check if we misssed the interrupt. */
844 WARN_ON(cfhsi->dev->cfhsi_get_peer_wake(cfhsi->dev, 827 WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
845 &ca_wake)); 828 &ca_wake));
846 829
847 if (ca_wake) { 830 if (ca_wake) {
848 dev_err(&cfhsi->ndev->dev, "%s: CA Wake missed !.\n", 831 netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
849 __func__); 832 __func__);
850 833
851 /* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */ 834 /* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */
@@ -856,11 +839,11 @@ static void cfhsi_wake_up(struct work_struct *work)
856 } 839 }
857 840
858 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); 841 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
859 cfhsi->dev->cfhsi_wake_down(cfhsi->dev); 842 cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
860 return; 843 return;
861 } 844 }
862wake_ack: 845wake_ack:
863 dev_dbg(&cfhsi->ndev->dev, "%s: Woken.\n", 846 netdev_dbg(cfhsi->ndev, "%s: Woken.\n",
864 __func__); 847 __func__);
865 848
866 /* Clear power up bit. */ 849 /* Clear power up bit. */
@@ -868,11 +851,11 @@ wake_ack:
868 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); 851 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
869 852
870 /* Resume read operation. */ 853 /* Resume read operation. */
871 dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n", __func__); 854 netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", __func__);
872 res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->dev); 855 res = cfhsi->ops->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->ops);
873 856
874 if (WARN_ON(res < 0)) 857 if (WARN_ON(res < 0))
875 dev_err(&cfhsi->ndev->dev, "%s: RX err %d.\n", __func__, res); 858 netdev_err(cfhsi->ndev, "%s: RX err %d.\n", __func__, res);
876 859
877 /* Clear power up acknowledment. */ 860 /* Clear power up acknowledment. */
878 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); 861 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
@@ -881,16 +864,16 @@ wake_ack:
881 864
882 /* Resume transmit if queues are not empty. */ 865 /* Resume transmit if queues are not empty. */
883 if (!cfhsi_tx_queue_len(cfhsi)) { 866 if (!cfhsi_tx_queue_len(cfhsi)) {
884 dev_dbg(&cfhsi->ndev->dev, "%s: Peer wake, start timer.\n", 867 netdev_dbg(cfhsi->ndev, "%s: Peer wake, start timer.\n",
885 __func__); 868 __func__);
886 /* Start inactivity timer. */ 869 /* Start inactivity timer. */
887 mod_timer(&cfhsi->inactivity_timer, 870 mod_timer(&cfhsi->inactivity_timer,
888 jiffies + cfhsi->inactivity_timeout); 871 jiffies + cfhsi->cfg.inactivity_timeout);
889 spin_unlock_bh(&cfhsi->lock); 872 spin_unlock_bh(&cfhsi->lock);
890 return; 873 return;
891 } 874 }
892 875
893 dev_dbg(&cfhsi->ndev->dev, "%s: Host wake.\n", 876 netdev_dbg(cfhsi->ndev, "%s: Host wake.\n",
894 __func__); 877 __func__);
895 878
896 spin_unlock_bh(&cfhsi->lock); 879 spin_unlock_bh(&cfhsi->lock);
@@ -900,14 +883,14 @@ wake_ack:
900 883
901 if (likely(len > 0)) { 884 if (likely(len > 0)) {
902 /* Set up new transfer. */ 885 /* Set up new transfer. */
903 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev); 886 res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
904 if (WARN_ON(res < 0)) { 887 if (WARN_ON(res < 0)) {
905 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n", 888 netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
906 __func__, res); 889 __func__, res);
907 cfhsi_abort_tx(cfhsi); 890 cfhsi_abort_tx(cfhsi);
908 } 891 }
909 } else { 892 } else {
910 dev_err(&cfhsi->ndev->dev, 893 netdev_err(cfhsi->ndev,
911 "%s: Failed to create HSI frame: %d.\n", 894 "%s: Failed to create HSI frame: %d.\n",
912 __func__, len); 895 __func__, len);
913 } 896 }
@@ -921,13 +904,13 @@ static void cfhsi_wake_down(struct work_struct *work)
921 int retry = CFHSI_WAKE_TOUT; 904 int retry = CFHSI_WAKE_TOUT;
922 905
923 cfhsi = container_of(work, struct cfhsi, wake_down_work); 906 cfhsi = container_of(work, struct cfhsi, wake_down_work);
924 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__); 907 netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
925 908
926 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 909 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
927 return; 910 return;
928 911
929 /* Deactivate wake line. */ 912 /* Deactivate wake line. */
930 cfhsi->dev->cfhsi_wake_down(cfhsi->dev); 913 cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
931 914
932 /* Wait for acknowledge. */ 915 /* Wait for acknowledge. */
933 ret = CFHSI_WAKE_TOUT; 916 ret = CFHSI_WAKE_TOUT;
@@ -936,26 +919,26 @@ static void cfhsi_wake_down(struct work_struct *work)
936 &cfhsi->bits), ret); 919 &cfhsi->bits), ret);
937 if (ret < 0) { 920 if (ret < 0) {
938 /* Interrupted by signal. */ 921 /* Interrupted by signal. */
939 dev_err(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n", 922 netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
940 __func__, ret); 923 __func__, ret);
941 return; 924 return;
942 } else if (!ret) { 925 } else if (!ret) {
943 bool ca_wake = true; 926 bool ca_wake = true;
944 927
945 /* Timeout */ 928 /* Timeout */
946 dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n", __func__); 929 netdev_err(cfhsi->ndev, "%s: Timeout.\n", __func__);
947 930
948 /* Check if we misssed the interrupt. */ 931 /* Check if we misssed the interrupt. */
949 WARN_ON(cfhsi->dev->cfhsi_get_peer_wake(cfhsi->dev, 932 WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
950 &ca_wake)); 933 &ca_wake));
951 if (!ca_wake) 934 if (!ca_wake)
952 dev_err(&cfhsi->ndev->dev, "%s: CA Wake missed !.\n", 935 netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
953 __func__); 936 __func__);
954 } 937 }
955 938
956 /* Check FIFO occupancy. */ 939 /* Check FIFO occupancy. */
957 while (retry) { 940 while (retry) {
958 WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev, 941 WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
959 &fifo_occupancy)); 942 &fifo_occupancy));
960 943
961 if (!fifo_occupancy) 944 if (!fifo_occupancy)
@@ -967,14 +950,13 @@ static void cfhsi_wake_down(struct work_struct *work)
967 } 950 }
968 951
969 if (!retry) 952 if (!retry)
970 dev_err(&cfhsi->ndev->dev, "%s: FIFO Timeout.\n", __func__); 953 netdev_err(cfhsi->ndev, "%s: FIFO Timeout.\n", __func__);
971 954
972 /* Clear AWAKE condition. */ 955 /* Clear AWAKE condition. */
973 clear_bit(CFHSI_AWAKE, &cfhsi->bits); 956 clear_bit(CFHSI_AWAKE, &cfhsi->bits);
974 957
975 /* Cancel pending RX requests. */ 958 /* Cancel pending RX requests. */
976 cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev); 959 cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
977
978} 960}
979 961
980static void cfhsi_out_of_sync(struct work_struct *work) 962static void cfhsi_out_of_sync(struct work_struct *work)
@@ -988,12 +970,12 @@ static void cfhsi_out_of_sync(struct work_struct *work)
988 rtnl_unlock(); 970 rtnl_unlock();
989} 971}
990 972
991static void cfhsi_wake_up_cb(struct cfhsi_drv *drv) 973static void cfhsi_wake_up_cb(struct cfhsi_cb_ops *cb_ops)
992{ 974{
993 struct cfhsi *cfhsi = NULL; 975 struct cfhsi *cfhsi = NULL;
994 976
995 cfhsi = container_of(drv, struct cfhsi, drv); 977 cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
996 dev_dbg(&cfhsi->ndev->dev, "%s.\n", 978 netdev_dbg(cfhsi->ndev, "%s.\n",
997 __func__); 979 __func__);
998 980
999 set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); 981 set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
@@ -1007,12 +989,12 @@ static void cfhsi_wake_up_cb(struct cfhsi_drv *drv)
1007 queue_work(cfhsi->wq, &cfhsi->wake_up_work); 989 queue_work(cfhsi->wq, &cfhsi->wake_up_work);
1008} 990}
1009 991
1010static void cfhsi_wake_down_cb(struct cfhsi_drv *drv) 992static void cfhsi_wake_down_cb(struct cfhsi_cb_ops *cb_ops)
1011{ 993{
1012 struct cfhsi *cfhsi = NULL; 994 struct cfhsi *cfhsi = NULL;
1013 995
1014 cfhsi = container_of(drv, struct cfhsi, drv); 996 cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
1015 dev_dbg(&cfhsi->ndev->dev, "%s.\n", 997 netdev_dbg(cfhsi->ndev, "%s.\n",
1016 __func__); 998 __func__);
1017 999
1018 /* Initiating low power is only permitted by the host (us). */ 1000 /* Initiating low power is only permitted by the host (us). */
@@ -1024,7 +1006,7 @@ static void cfhsi_aggregation_tout(unsigned long arg)
1024{ 1006{
1025 struct cfhsi *cfhsi = (struct cfhsi *)arg; 1007 struct cfhsi *cfhsi = (struct cfhsi *)arg;
1026 1008
1027 dev_dbg(&cfhsi->ndev->dev, "%s.\n", 1009 netdev_dbg(cfhsi->ndev, "%s.\n",
1028 __func__); 1010 __func__);
1029 1011
1030 cfhsi_start_tx(cfhsi); 1012 cfhsi_start_tx(cfhsi);
@@ -1077,7 +1059,7 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
1077 1059
1078 /* Send flow off if number of packets is above high water mark. */ 1060 /* Send flow off if number of packets is above high water mark. */
1079 if (!cfhsi->flow_off_sent && 1061 if (!cfhsi->flow_off_sent &&
1080 cfhsi_tx_queue_len(cfhsi) > cfhsi->q_high_mark && 1062 cfhsi_tx_queue_len(cfhsi) > cfhsi->cfg.q_high_mark &&
1081 cfhsi->cfdev.flowctrl) { 1063 cfhsi->cfdev.flowctrl) {
1082 cfhsi->flow_off_sent = 1; 1064 cfhsi->flow_off_sent = 1;
1083 cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF); 1065 cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
@@ -1114,9 +1096,9 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
1114 WARN_ON(!len); 1096 WARN_ON(!len);
1115 1097
1116 /* Set up new transfer. */ 1098 /* Set up new transfer. */
1117 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev); 1099 res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
1118 if (WARN_ON(res < 0)) { 1100 if (WARN_ON(res < 0)) {
1119 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n", 1101 netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
1120 __func__, res); 1102 __func__, res);
1121 cfhsi_abort_tx(cfhsi); 1103 cfhsi_abort_tx(cfhsi);
1122 } 1104 }
@@ -1129,19 +1111,19 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
1129 return 0; 1111 return 0;
1130} 1112}
1131 1113
1132static const struct net_device_ops cfhsi_ops; 1114static const struct net_device_ops cfhsi_netdevops;
1133 1115
1134static void cfhsi_setup(struct net_device *dev) 1116static void cfhsi_setup(struct net_device *dev)
1135{ 1117{
1136 int i; 1118 int i;
1137 struct cfhsi *cfhsi = netdev_priv(dev); 1119 struct cfhsi *cfhsi = netdev_priv(dev);
1138 dev->features = 0; 1120 dev->features = 0;
1139 dev->netdev_ops = &cfhsi_ops;
1140 dev->type = ARPHRD_CAIF; 1121 dev->type = ARPHRD_CAIF;
1141 dev->flags = IFF_POINTOPOINT | IFF_NOARP; 1122 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
1142 dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ; 1123 dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
1143 dev->tx_queue_len = 0; 1124 dev->tx_queue_len = 0;
1144 dev->destructor = free_netdev; 1125 dev->destructor = free_netdev;
1126 dev->netdev_ops = &cfhsi_netdevops;
1145 for (i = 0; i < CFHSI_PRIO_LAST; ++i) 1127 for (i = 0; i < CFHSI_PRIO_LAST; ++i)
1146 skb_queue_head_init(&cfhsi->qhead[i]); 1128 skb_queue_head_init(&cfhsi->qhead[i]);
1147 cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW; 1129 cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
@@ -1149,43 +1131,7 @@ static void cfhsi_setup(struct net_device *dev)
1149 cfhsi->cfdev.use_stx = false; 1131 cfhsi->cfdev.use_stx = false;
1150 cfhsi->cfdev.use_fcs = false; 1132 cfhsi->cfdev.use_fcs = false;
1151 cfhsi->ndev = dev; 1133 cfhsi->ndev = dev;
1152} 1134 cfhsi->cfg = hsi_default_config;
1153
1154int cfhsi_probe(struct platform_device *pdev)
1155{
1156 struct cfhsi *cfhsi = NULL;
1157 struct net_device *ndev;
1158
1159 int res;
1160
1161 ndev = alloc_netdev(sizeof(struct cfhsi), "cfhsi%d", cfhsi_setup);
1162 if (!ndev)
1163 return -ENODEV;
1164
1165 cfhsi = netdev_priv(ndev);
1166 cfhsi->ndev = ndev;
1167 cfhsi->pdev = pdev;
1168
1169 /* Assign the HSI device. */
1170 cfhsi->dev = pdev->dev.platform_data;
1171
1172 /* Assign the driver to this HSI device. */
1173 cfhsi->dev->drv = &cfhsi->drv;
1174
1175 /* Register network device. */
1176 res = register_netdev(ndev);
1177 if (res) {
1178 dev_err(&ndev->dev, "%s: Registration error: %d.\n",
1179 __func__, res);
1180 free_netdev(ndev);
1181 return -ENODEV;
1182 }
1183 /* Add CAIF HSI device to list. */
1184 spin_lock(&cfhsi_list_lock);
1185 list_add_tail(&cfhsi->list, &cfhsi_list);
1186 spin_unlock(&cfhsi_list_lock);
1187
1188 return res;
1189} 1135}
1190 1136
1191static int cfhsi_open(struct net_device *ndev) 1137static int cfhsi_open(struct net_device *ndev)
@@ -1201,9 +1147,6 @@ static int cfhsi_open(struct net_device *ndev)
1201 1147
1202 /* Set flow info */ 1148 /* Set flow info */
1203 cfhsi->flow_off_sent = 0; 1149 cfhsi->flow_off_sent = 0;
1204 cfhsi->q_low_mark = LOW_WATER_MARK;
1205 cfhsi->q_high_mark = HIGH_WATER_MARK;
1206
1207 1150
1208 /* 1151 /*
1209 * Allocate a TX buffer with the size of a HSI packet descriptors 1152 * Allocate a TX buffer with the size of a HSI packet descriptors
@@ -1231,20 +1174,8 @@ static int cfhsi_open(struct net_device *ndev)
1231 goto err_alloc_rx_flip; 1174 goto err_alloc_rx_flip;
1232 } 1175 }
1233 1176
1234 /* Pre-calculate inactivity timeout. */
1235 if (inactivity_timeout != -1) {
1236 cfhsi->inactivity_timeout =
1237 inactivity_timeout * HZ / 1000;
1238 if (!cfhsi->inactivity_timeout)
1239 cfhsi->inactivity_timeout = 1;
1240 else if (cfhsi->inactivity_timeout > NEXT_TIMER_MAX_DELTA)
1241 cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1242 } else {
1243 cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1244 }
1245
1246 /* Initialize aggregation timeout */ 1177 /* Initialize aggregation timeout */
1247 cfhsi->aggregation_timeout = aggregation_timeout; 1178 cfhsi->cfg.aggregation_timeout = hsi_default_config.aggregation_timeout;
1248 1179
1249 /* Initialize recieve vaiables. */ 1180 /* Initialize recieve vaiables. */
1250 cfhsi->rx_ptr = cfhsi->rx_buf; 1181 cfhsi->rx_ptr = cfhsi->rx_buf;
@@ -1254,10 +1185,10 @@ static int cfhsi_open(struct net_device *ndev)
1254 spin_lock_init(&cfhsi->lock); 1185 spin_lock_init(&cfhsi->lock);
1255 1186
1256 /* Set up the driver. */ 1187 /* Set up the driver. */
1257 cfhsi->drv.tx_done_cb = cfhsi_tx_done_cb; 1188 cfhsi->cb_ops.tx_done_cb = cfhsi_tx_done_cb;
1258 cfhsi->drv.rx_done_cb = cfhsi_rx_done_cb; 1189 cfhsi->cb_ops.rx_done_cb = cfhsi_rx_done_cb;
1259 cfhsi->drv.wake_up_cb = cfhsi_wake_up_cb; 1190 cfhsi->cb_ops.wake_up_cb = cfhsi_wake_up_cb;
1260 cfhsi->drv.wake_down_cb = cfhsi_wake_down_cb; 1191 cfhsi->cb_ops.wake_down_cb = cfhsi_wake_down_cb;
1261 1192
1262 /* Initialize the work queues. */ 1193 /* Initialize the work queues. */
1263 INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up); 1194 INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
@@ -1271,9 +1202,9 @@ static int cfhsi_open(struct net_device *ndev)
1271 clear_bit(CFHSI_AWAKE, &cfhsi->bits); 1202 clear_bit(CFHSI_AWAKE, &cfhsi->bits);
1272 1203
1273 /* Create work thread. */ 1204 /* Create work thread. */
1274 cfhsi->wq = create_singlethread_workqueue(cfhsi->pdev->name); 1205 cfhsi->wq = create_singlethread_workqueue(cfhsi->ndev->name);
1275 if (!cfhsi->wq) { 1206 if (!cfhsi->wq) {
1276 dev_err(&cfhsi->ndev->dev, "%s: Failed to create work queue.\n", 1207 netdev_err(cfhsi->ndev, "%s: Failed to create work queue.\n",
1277 __func__); 1208 __func__);
1278 res = -ENODEV; 1209 res = -ENODEV;
1279 goto err_create_wq; 1210 goto err_create_wq;
@@ -1298,9 +1229,9 @@ static int cfhsi_open(struct net_device *ndev)
1298 cfhsi->aggregation_timer.function = cfhsi_aggregation_tout; 1229 cfhsi->aggregation_timer.function = cfhsi_aggregation_tout;
1299 1230
1300 /* Activate HSI interface. */ 1231 /* Activate HSI interface. */
1301 res = cfhsi->dev->cfhsi_up(cfhsi->dev); 1232 res = cfhsi->ops->cfhsi_up(cfhsi->ops);
1302 if (res) { 1233 if (res) {
1303 dev_err(&cfhsi->ndev->dev, 1234 netdev_err(cfhsi->ndev,
1304 "%s: can't activate HSI interface: %d.\n", 1235 "%s: can't activate HSI interface: %d.\n",
1305 __func__, res); 1236 __func__, res);
1306 goto err_activate; 1237 goto err_activate;
@@ -1309,14 +1240,14 @@ static int cfhsi_open(struct net_device *ndev)
1309 /* Flush FIFO */ 1240 /* Flush FIFO */
1310 res = cfhsi_flush_fifo(cfhsi); 1241 res = cfhsi_flush_fifo(cfhsi);
1311 if (res) { 1242 if (res) {
1312 dev_err(&cfhsi->ndev->dev, "%s: Can't flush FIFO: %d.\n", 1243 netdev_err(cfhsi->ndev, "%s: Can't flush FIFO: %d.\n",
1313 __func__, res); 1244 __func__, res);
1314 goto err_net_reg; 1245 goto err_net_reg;
1315 } 1246 }
1316 return res; 1247 return res;
1317 1248
1318 err_net_reg: 1249 err_net_reg:
1319 cfhsi->dev->cfhsi_down(cfhsi->dev); 1250 cfhsi->ops->cfhsi_down(cfhsi->ops);
1320 err_activate: 1251 err_activate:
1321 destroy_workqueue(cfhsi->wq); 1252 destroy_workqueue(cfhsi->wq);
1322 err_create_wq: 1253 err_create_wq:
@@ -1346,7 +1277,7 @@ static int cfhsi_close(struct net_device *ndev)
1346 del_timer_sync(&cfhsi->aggregation_timer); 1277 del_timer_sync(&cfhsi->aggregation_timer);
1347 1278
1348 /* Cancel pending RX request (if any) */ 1279 /* Cancel pending RX request (if any) */
1349 cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev); 1280 cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
1350 1281
1351 /* Destroy workqueue */ 1282 /* Destroy workqueue */
1352 destroy_workqueue(cfhsi->wq); 1283 destroy_workqueue(cfhsi->wq);
@@ -1359,7 +1290,7 @@ static int cfhsi_close(struct net_device *ndev)
1359 cfhsi_abort_tx(cfhsi); 1290 cfhsi_abort_tx(cfhsi);
1360 1291
1361 /* Deactivate interface */ 1292 /* Deactivate interface */
1362 cfhsi->dev->cfhsi_down(cfhsi->dev); 1293 cfhsi->ops->cfhsi_down(cfhsi->ops);
1363 1294
1364 /* Free buffers. */ 1295 /* Free buffers. */
1365 kfree(tx_buf); 1296 kfree(tx_buf);
@@ -1368,85 +1299,184 @@ static int cfhsi_close(struct net_device *ndev)
1368 return 0; 1299 return 0;
1369} 1300}
1370 1301
1371static const struct net_device_ops cfhsi_ops = { 1302static void cfhsi_uninit(struct net_device *dev)
1303{
1304 struct cfhsi *cfhsi = netdev_priv(dev);
1305 ASSERT_RTNL();
1306 symbol_put(cfhsi_get_device);
1307 list_del(&cfhsi->list);
1308}
1309
1310static const struct net_device_ops cfhsi_netdevops = {
1311 .ndo_uninit = cfhsi_uninit,
1372 .ndo_open = cfhsi_open, 1312 .ndo_open = cfhsi_open,
1373 .ndo_stop = cfhsi_close, 1313 .ndo_stop = cfhsi_close,
1374 .ndo_start_xmit = cfhsi_xmit 1314 .ndo_start_xmit = cfhsi_xmit
1375}; 1315};
1376 1316
1377int cfhsi_remove(struct platform_device *pdev) 1317static void cfhsi_netlink_parms(struct nlattr *data[], struct cfhsi *cfhsi)
1378{ 1318{
1379 struct list_head *list_node; 1319 int i;
1380 struct list_head *n;
1381 struct cfhsi *cfhsi = NULL;
1382 struct cfhsi_dev *dev;
1383 1320
1384 dev = (struct cfhsi_dev *)pdev->dev.platform_data; 1321 if (!data) {
1385 spin_lock(&cfhsi_list_lock); 1322 pr_debug("no params data found\n");
1386 list_for_each_safe(list_node, n, &cfhsi_list) { 1323 return;
1387 cfhsi = list_entry(list_node, struct cfhsi, list);
1388 /* Find the corresponding device. */
1389 if (cfhsi->dev == dev) {
1390 /* Remove from list. */
1391 list_del(list_node);
1392 spin_unlock(&cfhsi_list_lock);
1393 return 0;
1394 }
1395 } 1324 }
1396 spin_unlock(&cfhsi_list_lock); 1325
1397 return -ENODEV; 1326 i = __IFLA_CAIF_HSI_INACTIVITY_TOUT;
1327 /*
1328 * Inactivity timeout in millisecs. Lowest possible value is 1,
1329 * and highest possible is NEXT_TIMER_MAX_DELTA.
1330 */
1331 if (data[i]) {
1332 u32 inactivity_timeout = nla_get_u32(data[i]);
1333 /* Pre-calculate inactivity timeout. */
1334 cfhsi->cfg.inactivity_timeout = inactivity_timeout * HZ / 1000;
1335 if (cfhsi->cfg.inactivity_timeout == 0)
1336 cfhsi->cfg.inactivity_timeout = 1;
1337 else if (cfhsi->cfg.inactivity_timeout > NEXT_TIMER_MAX_DELTA)
1338 cfhsi->cfg.inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1339 }
1340
1341 i = __IFLA_CAIF_HSI_AGGREGATION_TOUT;
1342 if (data[i])
1343 cfhsi->cfg.aggregation_timeout = nla_get_u32(data[i]);
1344
1345 i = __IFLA_CAIF_HSI_HEAD_ALIGN;
1346 if (data[i])
1347 cfhsi->cfg.head_align = nla_get_u32(data[i]);
1348
1349 i = __IFLA_CAIF_HSI_TAIL_ALIGN;
1350 if (data[i])
1351 cfhsi->cfg.tail_align = nla_get_u32(data[i]);
1352
1353 i = __IFLA_CAIF_HSI_QHIGH_WATERMARK;
1354 if (data[i])
1355 cfhsi->cfg.q_high_mark = nla_get_u32(data[i]);
1356
1357 i = __IFLA_CAIF_HSI_QLOW_WATERMARK;
1358 if (data[i])
1359 cfhsi->cfg.q_low_mark = nla_get_u32(data[i]);
1360}
1361
1362static int caif_hsi_changelink(struct net_device *dev, struct nlattr *tb[],
1363 struct nlattr *data[])
1364{
1365 cfhsi_netlink_parms(data, netdev_priv(dev));
1366 netdev_state_change(dev);
1367 return 0;
1398} 1368}
1399 1369
1400struct platform_driver cfhsi_plat_drv = { 1370static const struct nla_policy caif_hsi_policy[__IFLA_CAIF_HSI_MAX + 1] = {
1401 .probe = cfhsi_probe, 1371 [__IFLA_CAIF_HSI_INACTIVITY_TOUT] = { .type = NLA_U32, .len = 4 },
1402 .remove = cfhsi_remove, 1372 [__IFLA_CAIF_HSI_AGGREGATION_TOUT] = { .type = NLA_U32, .len = 4 },
1403 .driver = { 1373 [__IFLA_CAIF_HSI_HEAD_ALIGN] = { .type = NLA_U32, .len = 4 },
1404 .name = "cfhsi", 1374 [__IFLA_CAIF_HSI_TAIL_ALIGN] = { .type = NLA_U32, .len = 4 },
1405 .owner = THIS_MODULE, 1375 [__IFLA_CAIF_HSI_QHIGH_WATERMARK] = { .type = NLA_U32, .len = 4 },
1406 }, 1376 [__IFLA_CAIF_HSI_QLOW_WATERMARK] = { .type = NLA_U32, .len = 4 },
1407}; 1377};
1408 1378
1409static void __exit cfhsi_exit_module(void) 1379static size_t caif_hsi_get_size(const struct net_device *dev)
1380{
1381 int i;
1382 size_t s = 0;
1383 for (i = __IFLA_CAIF_HSI_UNSPEC + 1; i < __IFLA_CAIF_HSI_MAX; i++)
1384 s += nla_total_size(caif_hsi_policy[i].len);
1385 return s;
1386}
1387
1388static int caif_hsi_fill_info(struct sk_buff *skb, const struct net_device *dev)
1389{
1390 struct cfhsi *cfhsi = netdev_priv(dev);
1391
1392 if (nla_put_u32(skb, __IFLA_CAIF_HSI_INACTIVITY_TOUT,
1393 cfhsi->cfg.inactivity_timeout) ||
1394 nla_put_u32(skb, __IFLA_CAIF_HSI_AGGREGATION_TOUT,
1395 cfhsi->cfg.aggregation_timeout) ||
1396 nla_put_u32(skb, __IFLA_CAIF_HSI_HEAD_ALIGN,
1397 cfhsi->cfg.head_align) ||
1398 nla_put_u32(skb, __IFLA_CAIF_HSI_TAIL_ALIGN,
1399 cfhsi->cfg.tail_align) ||
1400 nla_put_u32(skb, __IFLA_CAIF_HSI_QHIGH_WATERMARK,
1401 cfhsi->cfg.q_high_mark) ||
1402 nla_put_u32(skb, __IFLA_CAIF_HSI_QLOW_WATERMARK,
1403 cfhsi->cfg.q_low_mark))
1404 return -EMSGSIZE;
1405
1406 return 0;
1407}
1408
1409static int caif_hsi_newlink(struct net *src_net, struct net_device *dev,
1410 struct nlattr *tb[], struct nlattr *data[])
1410{ 1411{
1411 struct list_head *list_node;
1412 struct list_head *n;
1413 struct cfhsi *cfhsi = NULL; 1412 struct cfhsi *cfhsi = NULL;
1413 struct cfhsi_ops *(*get_ops)(void);
1414 1414
1415 spin_lock(&cfhsi_list_lock); 1415 ASSERT_RTNL();
1416 list_for_each_safe(list_node, n, &cfhsi_list) {
1417 cfhsi = list_entry(list_node, struct cfhsi, list);
1418 1416
1419 /* Remove from list. */ 1417 cfhsi = netdev_priv(dev);
1420 list_del(list_node); 1418 cfhsi_netlink_parms(data, cfhsi);
1421 spin_unlock(&cfhsi_list_lock); 1419 dev_net_set(cfhsi->ndev, src_net);
1420
1421 get_ops = symbol_get(cfhsi_get_ops);
1422 if (!get_ops) {
1423 pr_err("%s: failed to get the cfhsi_ops\n", __func__);
1424 return -ENODEV;
1425 }
1422 1426
1423 unregister_netdevice(cfhsi->ndev); 1427 /* Assign the HSI device. */
1428 cfhsi->ops = (*get_ops)();
1429 if (!cfhsi->ops) {
1430 pr_err("%s: failed to get the cfhsi_ops\n", __func__);
1431 goto err;
1432 }
1424 1433
1425 spin_lock(&cfhsi_list_lock); 1434 /* Assign the driver to this HSI device. */
1435 cfhsi->ops->cb_ops = &cfhsi->cb_ops;
1436 if (register_netdevice(dev)) {
1437 pr_warn("%s: caif_hsi device registration failed\n", __func__);
1438 goto err;
1426 } 1439 }
1427 spin_unlock(&cfhsi_list_lock); 1440 /* Add CAIF HSI device to list. */
1441 list_add_tail(&cfhsi->list, &cfhsi_list);
1428 1442
1429 /* Unregister platform driver. */ 1443 return 0;
1430 platform_driver_unregister(&cfhsi_plat_drv); 1444err:
1445 symbol_put(cfhsi_get_ops);
1446 return -ENODEV;
1431} 1447}
1432 1448
1433static int __init cfhsi_init_module(void) 1449static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
1450 .kind = "cfhsi",
1451 .priv_size = sizeof(struct cfhsi),
1452 .setup = cfhsi_setup,
1453 .maxtype = __IFLA_CAIF_HSI_MAX,
1454 .policy = caif_hsi_policy,
1455 .newlink = caif_hsi_newlink,
1456 .changelink = caif_hsi_changelink,
1457 .get_size = caif_hsi_get_size,
1458 .fill_info = caif_hsi_fill_info,
1459};
1460
1461static void __exit cfhsi_exit_module(void)
1434{ 1462{
1435 int result; 1463 struct list_head *list_node;
1464 struct list_head *n;
1465 struct cfhsi *cfhsi;
1436 1466
1437 /* Initialize spin lock. */ 1467 rtnl_link_unregister(&caif_hsi_link_ops);
1438 spin_lock_init(&cfhsi_list_lock);
1439 1468
1440 /* Register platform driver. */ 1469 rtnl_lock();
1441 result = platform_driver_register(&cfhsi_plat_drv); 1470 list_for_each_safe(list_node, n, &cfhsi_list) {
1442 if (result) { 1471 cfhsi = list_entry(list_node, struct cfhsi, list);
1443 printk(KERN_ERR "Could not register platform HSI driver: %d.\n", 1472 unregister_netdev(cfhsi->ndev);
1444 result);
1445 goto err_dev_register;
1446 } 1473 }
1474 rtnl_unlock();
1475}
1447 1476
1448 err_dev_register: 1477static int __init cfhsi_init_module(void)
1449 return result; 1478{
1479 return rtnl_link_register(&caif_hsi_link_ops);
1450} 1480}
1451 1481
1452module_init(cfhsi_init_module); 1482module_init(cfhsi_init_module);
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 3f88473423e9..ea3143895e6d 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -597,7 +597,7 @@ static int __devinit bfin_can_probe(struct platform_device *pdev)
597 dev_info(&pdev->dev, 597 dev_info(&pdev->dev,
598 "%s device registered" 598 "%s device registered"
599 "(&reg_base=%p, rx_irq=%d, tx_irq=%d, err_irq=%d, sclk=%d)\n", 599 "(&reg_base=%p, rx_irq=%d, tx_irq=%d, err_irq=%d, sclk=%d)\n",
600 DRV_NAME, (void *)priv->membase, priv->rx_irq, 600 DRV_NAME, priv->membase, priv->rx_irq,
601 priv->tx_irq, priv->err_irq, priv->can.clock.freq); 601 priv->tx_irq, priv->err_irq, priv->can.clock.freq);
602 return 0; 602 return 0;
603 603
diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig
index ffb9773d102d..3b83bafcd947 100644
--- a/drivers/net/can/c_can/Kconfig
+++ b/drivers/net/can/c_can/Kconfig
@@ -1,15 +1,23 @@
1menuconfig CAN_C_CAN 1menuconfig CAN_C_CAN
2 tristate "Bosch C_CAN devices" 2 tristate "Bosch C_CAN/D_CAN devices"
3 depends on CAN_DEV && HAS_IOMEM 3 depends on CAN_DEV && HAS_IOMEM
4 4
5if CAN_C_CAN 5if CAN_C_CAN
6 6
7config CAN_C_CAN_PLATFORM 7config CAN_C_CAN_PLATFORM
8 tristate "Generic Platform Bus based C_CAN driver" 8 tristate "Generic Platform Bus based C_CAN/D_CAN driver"
9 ---help--- 9 ---help---
10 This driver adds support for the C_CAN chips connected to 10 This driver adds support for the C_CAN/D_CAN chips connected
11 the "platform bus" (Linux abstraction for directly to the 11 to the "platform bus" (Linux abstraction for directly to the
12 processor attached devices) which can be found on various 12 processor attached devices) which can be found on various
13 boards from ST Microelectronics (http://www.st.com) 13 boards from ST Microelectronics (http://www.st.com) like the
14 like the SPEAr1310 and SPEAr320 evaluation boards. 14 SPEAr1310 and SPEAr320 evaluation boards & TI (www.ti.com)
15 boards like am335x, dm814x, dm813x and dm811x.
16
17config CAN_C_CAN_PCI
18 tristate "Generic PCI Bus based C_CAN/D_CAN driver"
19 depends on PCI
20 ---help---
21 This driver adds support for the C_CAN/D_CAN chips connected
22 to the PCI bus.
15endif 23endif
diff --git a/drivers/net/can/c_can/Makefile b/drivers/net/can/c_can/Makefile
index 9273f6d5c4b7..ad1cc842170a 100644
--- a/drivers/net/can/c_can/Makefile
+++ b/drivers/net/can/c_can/Makefile
@@ -4,5 +4,6 @@
4 4
5obj-$(CONFIG_CAN_C_CAN) += c_can.o 5obj-$(CONFIG_CAN_C_CAN) += c_can.o
6obj-$(CONFIG_CAN_C_CAN_PLATFORM) += c_can_platform.o 6obj-$(CONFIG_CAN_C_CAN_PLATFORM) += c_can_platform.o
7obj-$(CONFIG_CAN_C_CAN_PCI) += c_can_pci.o
7 8
8ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG 9ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 86cd532c78f9..eea660800a09 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -41,6 +41,10 @@
41 41
42#include "c_can.h" 42#include "c_can.h"
43 43
44/* Number of interface registers */
45#define IF_ENUM_REG_LEN 11
46#define C_CAN_IFACE(reg, iface) (C_CAN_IF1_##reg + (iface) * IF_ENUM_REG_LEN)
47
44/* control register */ 48/* control register */
45#define CONTROL_TEST BIT(7) 49#define CONTROL_TEST BIT(7)
46#define CONTROL_CCE BIT(6) 50#define CONTROL_CCE BIT(6)
@@ -209,10 +213,10 @@ static inline int get_tx_echo_msg_obj(const struct c_can_priv *priv)
209 C_CAN_MSG_OBJ_TX_FIRST; 213 C_CAN_MSG_OBJ_TX_FIRST;
210} 214}
211 215
212static u32 c_can_read_reg32(struct c_can_priv *priv, void *reg) 216static u32 c_can_read_reg32(struct c_can_priv *priv, enum reg index)
213{ 217{
214 u32 val = priv->read_reg(priv, reg); 218 u32 val = priv->read_reg(priv, index);
215 val |= ((u32) priv->read_reg(priv, reg + 2)) << 16; 219 val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
216 return val; 220 return val;
217} 221}
218 222
@@ -220,14 +224,14 @@ static void c_can_enable_all_interrupts(struct c_can_priv *priv,
220 int enable) 224 int enable)
221{ 225{
222 unsigned int cntrl_save = priv->read_reg(priv, 226 unsigned int cntrl_save = priv->read_reg(priv,
223 &priv->regs->control); 227 C_CAN_CTRL_REG);
224 228
225 if (enable) 229 if (enable)
226 cntrl_save |= (CONTROL_SIE | CONTROL_EIE | CONTROL_IE); 230 cntrl_save |= (CONTROL_SIE | CONTROL_EIE | CONTROL_IE);
227 else 231 else
228 cntrl_save &= ~(CONTROL_EIE | CONTROL_IE | CONTROL_SIE); 232 cntrl_save &= ~(CONTROL_EIE | CONTROL_IE | CONTROL_SIE);
229 233
230 priv->write_reg(priv, &priv->regs->control, cntrl_save); 234 priv->write_reg(priv, C_CAN_CTRL_REG, cntrl_save);
231} 235}
232 236
233static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface) 237static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface)
@@ -235,7 +239,7 @@ static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface)
235 int count = MIN_TIMEOUT_VALUE; 239 int count = MIN_TIMEOUT_VALUE;
236 240
237 while (count && priv->read_reg(priv, 241 while (count && priv->read_reg(priv,
238 &priv->regs->ifregs[iface].com_req) & 242 C_CAN_IFACE(COMREQ_REG, iface)) &
239 IF_COMR_BUSY) { 243 IF_COMR_BUSY) {
240 count--; 244 count--;
241 udelay(1); 245 udelay(1);
@@ -258,9 +262,9 @@ static inline void c_can_object_get(struct net_device *dev,
258 * register and message RAM must be complete in 6 CAN-CLK 262 * register and message RAM must be complete in 6 CAN-CLK
259 * period. 263 * period.
260 */ 264 */
261 priv->write_reg(priv, &priv->regs->ifregs[iface].com_mask, 265 priv->write_reg(priv, C_CAN_IFACE(COMMSK_REG, iface),
262 IFX_WRITE_LOW_16BIT(mask)); 266 IFX_WRITE_LOW_16BIT(mask));
263 priv->write_reg(priv, &priv->regs->ifregs[iface].com_req, 267 priv->write_reg(priv, C_CAN_IFACE(COMREQ_REG, iface),
264 IFX_WRITE_LOW_16BIT(objno)); 268 IFX_WRITE_LOW_16BIT(objno));
265 269
266 if (c_can_msg_obj_is_busy(priv, iface)) 270 if (c_can_msg_obj_is_busy(priv, iface))
@@ -278,9 +282,9 @@ static inline void c_can_object_put(struct net_device *dev,
278 * register and message RAM must be complete in 6 CAN-CLK 282 * register and message RAM must be complete in 6 CAN-CLK
279 * period. 283 * period.
280 */ 284 */
281 priv->write_reg(priv, &priv->regs->ifregs[iface].com_mask, 285 priv->write_reg(priv, C_CAN_IFACE(COMMSK_REG, iface),
282 (IF_COMM_WR | IFX_WRITE_LOW_16BIT(mask))); 286 (IF_COMM_WR | IFX_WRITE_LOW_16BIT(mask)));
283 priv->write_reg(priv, &priv->regs->ifregs[iface].com_req, 287 priv->write_reg(priv, C_CAN_IFACE(COMREQ_REG, iface),
284 IFX_WRITE_LOW_16BIT(objno)); 288 IFX_WRITE_LOW_16BIT(objno));
285 289
286 if (c_can_msg_obj_is_busy(priv, iface)) 290 if (c_can_msg_obj_is_busy(priv, iface))
@@ -306,18 +310,18 @@ static void c_can_write_msg_object(struct net_device *dev,
306 310
307 flags |= IF_ARB_MSGVAL; 311 flags |= IF_ARB_MSGVAL;
308 312
309 priv->write_reg(priv, &priv->regs->ifregs[iface].arb1, 313 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
310 IFX_WRITE_LOW_16BIT(id)); 314 IFX_WRITE_LOW_16BIT(id));
311 priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, flags | 315 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), flags |
312 IFX_WRITE_HIGH_16BIT(id)); 316 IFX_WRITE_HIGH_16BIT(id));
313 317
314 for (i = 0; i < frame->can_dlc; i += 2) { 318 for (i = 0; i < frame->can_dlc; i += 2) {
315 priv->write_reg(priv, &priv->regs->ifregs[iface].data[i / 2], 319 priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
316 frame->data[i] | (frame->data[i + 1] << 8)); 320 frame->data[i] | (frame->data[i + 1] << 8));
317 } 321 }
318 322
319 /* enable interrupt for this message object */ 323 /* enable interrupt for this message object */
320 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, 324 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
321 IF_MCONT_TXIE | IF_MCONT_TXRQST | IF_MCONT_EOB | 325 IF_MCONT_TXIE | IF_MCONT_TXRQST | IF_MCONT_EOB |
322 frame->can_dlc); 326 frame->can_dlc);
323 c_can_object_put(dev, iface, objno, IF_COMM_ALL); 327 c_can_object_put(dev, iface, objno, IF_COMM_ALL);
@@ -329,7 +333,7 @@ static inline void c_can_mark_rx_msg_obj(struct net_device *dev,
329{ 333{
330 struct c_can_priv *priv = netdev_priv(dev); 334 struct c_can_priv *priv = netdev_priv(dev);
331 335
332 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, 336 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
333 ctrl_mask & ~(IF_MCONT_MSGLST | IF_MCONT_INTPND)); 337 ctrl_mask & ~(IF_MCONT_MSGLST | IF_MCONT_INTPND));
334 c_can_object_put(dev, iface, obj, IF_COMM_CONTROL); 338 c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
335 339
@@ -343,7 +347,7 @@ static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
343 struct c_can_priv *priv = netdev_priv(dev); 347 struct c_can_priv *priv = netdev_priv(dev);
344 348
345 for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) { 349 for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) {
346 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, 350 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
347 ctrl_mask & ~(IF_MCONT_MSGLST | 351 ctrl_mask & ~(IF_MCONT_MSGLST |
348 IF_MCONT_INTPND | IF_MCONT_NEWDAT)); 352 IF_MCONT_INTPND | IF_MCONT_NEWDAT));
349 c_can_object_put(dev, iface, i, IF_COMM_CONTROL); 353 c_can_object_put(dev, iface, i, IF_COMM_CONTROL);
@@ -356,7 +360,7 @@ static inline void c_can_activate_rx_msg_obj(struct net_device *dev,
356{ 360{
357 struct c_can_priv *priv = netdev_priv(dev); 361 struct c_can_priv *priv = netdev_priv(dev);
358 362
359 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, 363 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
360 ctrl_mask & ~(IF_MCONT_MSGLST | 364 ctrl_mask & ~(IF_MCONT_MSGLST |
361 IF_MCONT_INTPND | IF_MCONT_NEWDAT)); 365 IF_MCONT_INTPND | IF_MCONT_NEWDAT));
362 c_can_object_put(dev, iface, obj, IF_COMM_CONTROL); 366 c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
@@ -374,7 +378,7 @@ static void c_can_handle_lost_msg_obj(struct net_device *dev,
374 378
375 c_can_object_get(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST); 379 c_can_object_get(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
376 380
377 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, 381 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
378 IF_MCONT_CLR_MSGLST); 382 IF_MCONT_CLR_MSGLST);
379 383
380 c_can_object_put(dev, 0, objno, IF_COMM_CONTROL); 384 c_can_object_put(dev, 0, objno, IF_COMM_CONTROL);
@@ -410,8 +414,8 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
410 414
411 frame->can_dlc = get_can_dlc(ctrl & 0x0F); 415 frame->can_dlc = get_can_dlc(ctrl & 0x0F);
412 416
413 flags = priv->read_reg(priv, &priv->regs->ifregs[iface].arb2); 417 flags = priv->read_reg(priv, C_CAN_IFACE(ARB2_REG, iface));
414 val = priv->read_reg(priv, &priv->regs->ifregs[iface].arb1) | 418 val = priv->read_reg(priv, C_CAN_IFACE(ARB1_REG, iface)) |
415 (flags << 16); 419 (flags << 16);
416 420
417 if (flags & IF_ARB_MSGXTD) 421 if (flags & IF_ARB_MSGXTD)
@@ -424,7 +428,7 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
424 else { 428 else {
425 for (i = 0; i < frame->can_dlc; i += 2) { 429 for (i = 0; i < frame->can_dlc; i += 2) {
426 data = priv->read_reg(priv, 430 data = priv->read_reg(priv,
427 &priv->regs->ifregs[iface].data[i / 2]); 431 C_CAN_IFACE(DATA1_REG, iface) + i / 2);
428 frame->data[i] = data; 432 frame->data[i] = data;
429 frame->data[i + 1] = data >> 8; 433 frame->data[i + 1] = data >> 8;
430 } 434 }
@@ -444,40 +448,40 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
444{ 448{
445 struct c_can_priv *priv = netdev_priv(dev); 449 struct c_can_priv *priv = netdev_priv(dev);
446 450
447 priv->write_reg(priv, &priv->regs->ifregs[iface].mask1, 451 priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface),
448 IFX_WRITE_LOW_16BIT(mask)); 452 IFX_WRITE_LOW_16BIT(mask));
449 priv->write_reg(priv, &priv->regs->ifregs[iface].mask2, 453 priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface),
450 IFX_WRITE_HIGH_16BIT(mask)); 454 IFX_WRITE_HIGH_16BIT(mask));
451 455
452 priv->write_reg(priv, &priv->regs->ifregs[iface].arb1, 456 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
453 IFX_WRITE_LOW_16BIT(id)); 457 IFX_WRITE_LOW_16BIT(id));
454 priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, 458 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface),
455 (IF_ARB_MSGVAL | IFX_WRITE_HIGH_16BIT(id))); 459 (IF_ARB_MSGVAL | IFX_WRITE_HIGH_16BIT(id)));
456 460
457 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, mcont); 461 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont);
458 c_can_object_put(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST); 462 c_can_object_put(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
459 463
460 netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno, 464 netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
461 c_can_read_reg32(priv, &priv->regs->msgval1)); 465 c_can_read_reg32(priv, C_CAN_MSGVAL1_REG));
462} 466}
463 467
464static void c_can_inval_msg_object(struct net_device *dev, int iface, int objno) 468static void c_can_inval_msg_object(struct net_device *dev, int iface, int objno)
465{ 469{
466 struct c_can_priv *priv = netdev_priv(dev); 470 struct c_can_priv *priv = netdev_priv(dev);
467 471
468 priv->write_reg(priv, &priv->regs->ifregs[iface].arb1, 0); 472 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 0);
469 priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, 0); 473 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 0);
470 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, 0); 474 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 0);
471 475
472 c_can_object_put(dev, iface, objno, IF_COMM_ARB | IF_COMM_CONTROL); 476 c_can_object_put(dev, iface, objno, IF_COMM_ARB | IF_COMM_CONTROL);
473 477
474 netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno, 478 netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
475 c_can_read_reg32(priv, &priv->regs->msgval1)); 479 c_can_read_reg32(priv, C_CAN_MSGVAL1_REG));
476} 480}
477 481
478static inline int c_can_is_next_tx_obj_busy(struct c_can_priv *priv, int objno) 482static inline int c_can_is_next_tx_obj_busy(struct c_can_priv *priv, int objno)
479{ 483{
480 int val = c_can_read_reg32(priv, &priv->regs->txrqst1); 484 int val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
481 485
482 /* 486 /*
483 * as transmission request register's bit n-1 corresponds to 487 * as transmission request register's bit n-1 corresponds to
@@ -540,12 +544,12 @@ static int c_can_set_bittiming(struct net_device *dev)
540 netdev_info(dev, 544 netdev_info(dev,
541 "setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe); 545 "setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe);
542 546
543 ctrl_save = priv->read_reg(priv, &priv->regs->control); 547 ctrl_save = priv->read_reg(priv, C_CAN_CTRL_REG);
544 priv->write_reg(priv, &priv->regs->control, 548 priv->write_reg(priv, C_CAN_CTRL_REG,
545 ctrl_save | CONTROL_CCE | CONTROL_INIT); 549 ctrl_save | CONTROL_CCE | CONTROL_INIT);
546 priv->write_reg(priv, &priv->regs->btr, reg_btr); 550 priv->write_reg(priv, C_CAN_BTR_REG, reg_btr);
547 priv->write_reg(priv, &priv->regs->brp_ext, reg_brpe); 551 priv->write_reg(priv, C_CAN_BRPEXT_REG, reg_brpe);
548 priv->write_reg(priv, &priv->regs->control, ctrl_save); 552 priv->write_reg(priv, C_CAN_CTRL_REG, ctrl_save);
549 553
550 return 0; 554 return 0;
551} 555}
@@ -587,36 +591,36 @@ static void c_can_chip_config(struct net_device *dev)
587 struct c_can_priv *priv = netdev_priv(dev); 591 struct c_can_priv *priv = netdev_priv(dev);
588 592
589 /* enable automatic retransmission */ 593 /* enable automatic retransmission */
590 priv->write_reg(priv, &priv->regs->control, 594 priv->write_reg(priv, C_CAN_CTRL_REG,
591 CONTROL_ENABLE_AR); 595 CONTROL_ENABLE_AR);
592 596
593 if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) && 597 if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) &&
594 (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) { 598 (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) {
595 /* loopback + silent mode : useful for hot self-test */ 599 /* loopback + silent mode : useful for hot self-test */
596 priv->write_reg(priv, &priv->regs->control, CONTROL_EIE | 600 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE |
597 CONTROL_SIE | CONTROL_IE | CONTROL_TEST); 601 CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
598 priv->write_reg(priv, &priv->regs->test, 602 priv->write_reg(priv, C_CAN_TEST_REG,
599 TEST_LBACK | TEST_SILENT); 603 TEST_LBACK | TEST_SILENT);
600 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { 604 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
601 /* loopback mode : useful for self-test function */ 605 /* loopback mode : useful for self-test function */
602 priv->write_reg(priv, &priv->regs->control, CONTROL_EIE | 606 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE |
603 CONTROL_SIE | CONTROL_IE | CONTROL_TEST); 607 CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
604 priv->write_reg(priv, &priv->regs->test, TEST_LBACK); 608 priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK);
605 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) { 609 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
606 /* silent mode : bus-monitoring mode */ 610 /* silent mode : bus-monitoring mode */
607 priv->write_reg(priv, &priv->regs->control, CONTROL_EIE | 611 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE |
608 CONTROL_SIE | CONTROL_IE | CONTROL_TEST); 612 CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
609 priv->write_reg(priv, &priv->regs->test, TEST_SILENT); 613 priv->write_reg(priv, C_CAN_TEST_REG, TEST_SILENT);
610 } else 614 } else
611 /* normal mode*/ 615 /* normal mode*/
612 priv->write_reg(priv, &priv->regs->control, 616 priv->write_reg(priv, C_CAN_CTRL_REG,
613 CONTROL_EIE | CONTROL_SIE | CONTROL_IE); 617 CONTROL_EIE | CONTROL_SIE | CONTROL_IE);
614 618
615 /* configure message objects */ 619 /* configure message objects */
616 c_can_configure_msg_objects(dev); 620 c_can_configure_msg_objects(dev);
617 621
618 /* set a `lec` value so that we can check for updates later */ 622 /* set a `lec` value so that we can check for updates later */
619 priv->write_reg(priv, &priv->regs->status, LEC_UNUSED); 623 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
620 624
621 /* set bittiming params */ 625 /* set bittiming params */
622 c_can_set_bittiming(dev); 626 c_can_set_bittiming(dev);
@@ -669,7 +673,7 @@ static int c_can_get_berr_counter(const struct net_device *dev,
669 unsigned int reg_err_counter; 673 unsigned int reg_err_counter;
670 struct c_can_priv *priv = netdev_priv(dev); 674 struct c_can_priv *priv = netdev_priv(dev);
671 675
672 reg_err_counter = priv->read_reg(priv, &priv->regs->err_cnt); 676 reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
673 bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >> 677 bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
674 ERR_CNT_REC_SHIFT; 678 ERR_CNT_REC_SHIFT;
675 bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK; 679 bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
@@ -697,12 +701,12 @@ static void c_can_do_tx(struct net_device *dev)
697 701
698 for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) { 702 for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
699 msg_obj_no = get_tx_echo_msg_obj(priv); 703 msg_obj_no = get_tx_echo_msg_obj(priv);
700 val = c_can_read_reg32(priv, &priv->regs->txrqst1); 704 val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
701 if (!(val & (1 << (msg_obj_no - 1)))) { 705 if (!(val & (1 << (msg_obj_no - 1)))) {
702 can_get_echo_skb(dev, 706 can_get_echo_skb(dev,
703 msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST); 707 msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
704 stats->tx_bytes += priv->read_reg(priv, 708 stats->tx_bytes += priv->read_reg(priv,
705 &priv->regs->ifregs[0].msg_cntrl) 709 C_CAN_IFACE(MSGCTRL_REG, 0))
706 & IF_MCONT_DLC_MASK; 710 & IF_MCONT_DLC_MASK;
707 stats->tx_packets++; 711 stats->tx_packets++;
708 c_can_inval_msg_object(dev, 0, msg_obj_no); 712 c_can_inval_msg_object(dev, 0, msg_obj_no);
@@ -744,11 +748,11 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
744 u32 num_rx_pkts = 0; 748 u32 num_rx_pkts = 0;
745 unsigned int msg_obj, msg_ctrl_save; 749 unsigned int msg_obj, msg_ctrl_save;
746 struct c_can_priv *priv = netdev_priv(dev); 750 struct c_can_priv *priv = netdev_priv(dev);
747 u32 val = c_can_read_reg32(priv, &priv->regs->intpnd1); 751 u32 val = c_can_read_reg32(priv, C_CAN_INTPND1_REG);
748 752
749 for (msg_obj = C_CAN_MSG_OBJ_RX_FIRST; 753 for (msg_obj = C_CAN_MSG_OBJ_RX_FIRST;
750 msg_obj <= C_CAN_MSG_OBJ_RX_LAST && quota > 0; 754 msg_obj <= C_CAN_MSG_OBJ_RX_LAST && quota > 0;
751 val = c_can_read_reg32(priv, &priv->regs->intpnd1), 755 val = c_can_read_reg32(priv, C_CAN_INTPND1_REG),
752 msg_obj++) { 756 msg_obj++) {
753 /* 757 /*
754 * as interrupt pending register's bit n-1 corresponds to 758 * as interrupt pending register's bit n-1 corresponds to
@@ -758,7 +762,7 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
758 c_can_object_get(dev, 0, msg_obj, IF_COMM_ALL & 762 c_can_object_get(dev, 0, msg_obj, IF_COMM_ALL &
759 ~IF_COMM_TXRQST); 763 ~IF_COMM_TXRQST);
760 msg_ctrl_save = priv->read_reg(priv, 764 msg_ctrl_save = priv->read_reg(priv,
761 &priv->regs->ifregs[0].msg_cntrl); 765 C_CAN_IFACE(MSGCTRL_REG, 0));
762 766
763 if (msg_ctrl_save & IF_MCONT_EOB) 767 if (msg_ctrl_save & IF_MCONT_EOB)
764 return num_rx_pkts; 768 return num_rx_pkts;
@@ -819,7 +823,7 @@ static int c_can_handle_state_change(struct net_device *dev,
819 return 0; 823 return 0;
820 824
821 c_can_get_berr_counter(dev, &bec); 825 c_can_get_berr_counter(dev, &bec);
822 reg_err_counter = priv->read_reg(priv, &priv->regs->err_cnt); 826 reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
823 rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >> 827 rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >>
824 ERR_CNT_RP_SHIFT; 828 ERR_CNT_RP_SHIFT;
825 829
@@ -935,7 +939,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
935 } 939 }
936 940
937 /* set a `lec` value so that we can check for updates later */ 941 /* set a `lec` value so that we can check for updates later */
938 priv->write_reg(priv, &priv->regs->status, LEC_UNUSED); 942 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
939 943
940 netif_receive_skb(skb); 944 netif_receive_skb(skb);
941 stats->rx_packets++; 945 stats->rx_packets++;
@@ -959,15 +963,15 @@ static int c_can_poll(struct napi_struct *napi, int quota)
959 /* status events have the highest priority */ 963 /* status events have the highest priority */
960 if (irqstatus == STATUS_INTERRUPT) { 964 if (irqstatus == STATUS_INTERRUPT) {
961 priv->current_status = priv->read_reg(priv, 965 priv->current_status = priv->read_reg(priv,
962 &priv->regs->status); 966 C_CAN_STS_REG);
963 967
964 /* handle Tx/Rx events */ 968 /* handle Tx/Rx events */
965 if (priv->current_status & STATUS_TXOK) 969 if (priv->current_status & STATUS_TXOK)
966 priv->write_reg(priv, &priv->regs->status, 970 priv->write_reg(priv, C_CAN_STS_REG,
967 priv->current_status & ~STATUS_TXOK); 971 priv->current_status & ~STATUS_TXOK);
968 972
969 if (priv->current_status & STATUS_RXOK) 973 if (priv->current_status & STATUS_RXOK)
970 priv->write_reg(priv, &priv->regs->status, 974 priv->write_reg(priv, C_CAN_STS_REG,
971 priv->current_status & ~STATUS_RXOK); 975 priv->current_status & ~STATUS_RXOK);
972 976
973 /* handle state changes */ 977 /* handle state changes */
@@ -1033,7 +1037,7 @@ static irqreturn_t c_can_isr(int irq, void *dev_id)
1033 struct net_device *dev = (struct net_device *)dev_id; 1037 struct net_device *dev = (struct net_device *)dev_id;
1034 struct c_can_priv *priv = netdev_priv(dev); 1038 struct c_can_priv *priv = netdev_priv(dev);
1035 1039
1036 priv->irqstatus = priv->read_reg(priv, &priv->regs->interrupt); 1040 priv->irqstatus = priv->read_reg(priv, C_CAN_INT_REG);
1037 if (!priv->irqstatus) 1041 if (!priv->irqstatus)
1038 return IRQ_NONE; 1042 return IRQ_NONE;
1039 1043
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index 5f32d34af507..01a7049ab990 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -22,43 +22,129 @@
22#ifndef C_CAN_H 22#ifndef C_CAN_H
23#define C_CAN_H 23#define C_CAN_H
24 24
25/* c_can IF registers */ 25enum reg {
26struct c_can_if_regs { 26 C_CAN_CTRL_REG = 0,
27 u16 com_req; 27 C_CAN_STS_REG,
28 u16 com_mask; 28 C_CAN_ERR_CNT_REG,
29 u16 mask1; 29 C_CAN_BTR_REG,
30 u16 mask2; 30 C_CAN_INT_REG,
31 u16 arb1; 31 C_CAN_TEST_REG,
32 u16 arb2; 32 C_CAN_BRPEXT_REG,
33 u16 msg_cntrl; 33 C_CAN_IF1_COMREQ_REG,
34 u16 data[4]; 34 C_CAN_IF1_COMMSK_REG,
35 u16 _reserved[13]; 35 C_CAN_IF1_MASK1_REG,
36 C_CAN_IF1_MASK2_REG,
37 C_CAN_IF1_ARB1_REG,
38 C_CAN_IF1_ARB2_REG,
39 C_CAN_IF1_MSGCTRL_REG,
40 C_CAN_IF1_DATA1_REG,
41 C_CAN_IF1_DATA2_REG,
42 C_CAN_IF1_DATA3_REG,
43 C_CAN_IF1_DATA4_REG,
44 C_CAN_IF2_COMREQ_REG,
45 C_CAN_IF2_COMMSK_REG,
46 C_CAN_IF2_MASK1_REG,
47 C_CAN_IF2_MASK2_REG,
48 C_CAN_IF2_ARB1_REG,
49 C_CAN_IF2_ARB2_REG,
50 C_CAN_IF2_MSGCTRL_REG,
51 C_CAN_IF2_DATA1_REG,
52 C_CAN_IF2_DATA2_REG,
53 C_CAN_IF2_DATA3_REG,
54 C_CAN_IF2_DATA4_REG,
55 C_CAN_TXRQST1_REG,
56 C_CAN_TXRQST2_REG,
57 C_CAN_NEWDAT1_REG,
58 C_CAN_NEWDAT2_REG,
59 C_CAN_INTPND1_REG,
60 C_CAN_INTPND2_REG,
61 C_CAN_MSGVAL1_REG,
62 C_CAN_MSGVAL2_REG,
36}; 63};
37 64
38/* c_can hardware registers */ 65static const u16 reg_map_c_can[] = {
39struct c_can_regs { 66 [C_CAN_CTRL_REG] = 0x00,
40 u16 control; 67 [C_CAN_STS_REG] = 0x02,
41 u16 status; 68 [C_CAN_ERR_CNT_REG] = 0x04,
42 u16 err_cnt; 69 [C_CAN_BTR_REG] = 0x06,
43 u16 btr; 70 [C_CAN_INT_REG] = 0x08,
44 u16 interrupt; 71 [C_CAN_TEST_REG] = 0x0A,
45 u16 test; 72 [C_CAN_BRPEXT_REG] = 0x0C,
46 u16 brp_ext; 73 [C_CAN_IF1_COMREQ_REG] = 0x10,
47 u16 _reserved1; 74 [C_CAN_IF1_COMMSK_REG] = 0x12,
48 struct c_can_if_regs ifregs[2]; /* [0] = IF1 and [1] = IF2 */ 75 [C_CAN_IF1_MASK1_REG] = 0x14,
49 u16 _reserved2[8]; 76 [C_CAN_IF1_MASK2_REG] = 0x16,
50 u16 txrqst1; 77 [C_CAN_IF1_ARB1_REG] = 0x18,
51 u16 txrqst2; 78 [C_CAN_IF1_ARB2_REG] = 0x1A,
52 u16 _reserved3[6]; 79 [C_CAN_IF1_MSGCTRL_REG] = 0x1C,
53 u16 newdat1; 80 [C_CAN_IF1_DATA1_REG] = 0x1E,
54 u16 newdat2; 81 [C_CAN_IF1_DATA2_REG] = 0x20,
55 u16 _reserved4[6]; 82 [C_CAN_IF1_DATA3_REG] = 0x22,
56 u16 intpnd1; 83 [C_CAN_IF1_DATA4_REG] = 0x24,
57 u16 intpnd2; 84 [C_CAN_IF2_COMREQ_REG] = 0x40,
58 u16 _reserved5[6]; 85 [C_CAN_IF2_COMMSK_REG] = 0x42,
59 u16 msgval1; 86 [C_CAN_IF2_MASK1_REG] = 0x44,
60 u16 msgval2; 87 [C_CAN_IF2_MASK2_REG] = 0x46,
61 u16 _reserved6[6]; 88 [C_CAN_IF2_ARB1_REG] = 0x48,
89 [C_CAN_IF2_ARB2_REG] = 0x4A,
90 [C_CAN_IF2_MSGCTRL_REG] = 0x4C,
91 [C_CAN_IF2_DATA1_REG] = 0x4E,
92 [C_CAN_IF2_DATA2_REG] = 0x50,
93 [C_CAN_IF2_DATA3_REG] = 0x52,
94 [C_CAN_IF2_DATA4_REG] = 0x54,
95 [C_CAN_TXRQST1_REG] = 0x80,
96 [C_CAN_TXRQST2_REG] = 0x82,
97 [C_CAN_NEWDAT1_REG] = 0x90,
98 [C_CAN_NEWDAT2_REG] = 0x92,
99 [C_CAN_INTPND1_REG] = 0xA0,
100 [C_CAN_INTPND2_REG] = 0xA2,
101 [C_CAN_MSGVAL1_REG] = 0xB0,
102 [C_CAN_MSGVAL2_REG] = 0xB2,
103};
104
105static const u16 reg_map_d_can[] = {
106 [C_CAN_CTRL_REG] = 0x00,
107 [C_CAN_STS_REG] = 0x04,
108 [C_CAN_ERR_CNT_REG] = 0x08,
109 [C_CAN_BTR_REG] = 0x0C,
110 [C_CAN_BRPEXT_REG] = 0x0E,
111 [C_CAN_INT_REG] = 0x10,
112 [C_CAN_TEST_REG] = 0x14,
113 [C_CAN_TXRQST1_REG] = 0x88,
114 [C_CAN_TXRQST2_REG] = 0x8A,
115 [C_CAN_NEWDAT1_REG] = 0x9C,
116 [C_CAN_NEWDAT2_REG] = 0x9E,
117 [C_CAN_INTPND1_REG] = 0xB0,
118 [C_CAN_INTPND2_REG] = 0xB2,
119 [C_CAN_MSGVAL1_REG] = 0xC4,
120 [C_CAN_MSGVAL2_REG] = 0xC6,
121 [C_CAN_IF1_COMREQ_REG] = 0x100,
122 [C_CAN_IF1_COMMSK_REG] = 0x102,
123 [C_CAN_IF1_MASK1_REG] = 0x104,
124 [C_CAN_IF1_MASK2_REG] = 0x106,
125 [C_CAN_IF1_ARB1_REG] = 0x108,
126 [C_CAN_IF1_ARB2_REG] = 0x10A,
127 [C_CAN_IF1_MSGCTRL_REG] = 0x10C,
128 [C_CAN_IF1_DATA1_REG] = 0x110,
129 [C_CAN_IF1_DATA2_REG] = 0x112,
130 [C_CAN_IF1_DATA3_REG] = 0x114,
131 [C_CAN_IF1_DATA4_REG] = 0x116,
132 [C_CAN_IF2_COMREQ_REG] = 0x120,
133 [C_CAN_IF2_COMMSK_REG] = 0x122,
134 [C_CAN_IF2_MASK1_REG] = 0x124,
135 [C_CAN_IF2_MASK2_REG] = 0x126,
136 [C_CAN_IF2_ARB1_REG] = 0x128,
137 [C_CAN_IF2_ARB2_REG] = 0x12A,
138 [C_CAN_IF2_MSGCTRL_REG] = 0x12C,
139 [C_CAN_IF2_DATA1_REG] = 0x130,
140 [C_CAN_IF2_DATA2_REG] = 0x132,
141 [C_CAN_IF2_DATA3_REG] = 0x134,
142 [C_CAN_IF2_DATA4_REG] = 0x136,
143};
144
145enum c_can_dev_id {
146 C_CAN_DEVTYPE,
147 D_CAN_DEVTYPE,
62}; 148};
63 149
64/* c_can private data structure */ 150/* c_can private data structure */
@@ -69,9 +155,10 @@ struct c_can_priv {
69 int tx_object; 155 int tx_object;
70 int current_status; 156 int current_status;
71 int last_status; 157 int last_status;
72 u16 (*read_reg) (struct c_can_priv *priv, void *reg); 158 u16 (*read_reg) (struct c_can_priv *priv, enum reg index);
73 void (*write_reg) (struct c_can_priv *priv, void *reg, u16 val); 159 void (*write_reg) (struct c_can_priv *priv, enum reg index, u16 val);
74 struct c_can_regs __iomem *regs; 160 void __iomem *base;
161 const u16 *regs;
75 unsigned long irq_flags; /* for request_irq() */ 162 unsigned long irq_flags; /* for request_irq() */
76 unsigned int tx_next; 163 unsigned int tx_next;
77 unsigned int tx_echo; 164 unsigned int tx_echo;
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
new file mode 100644
index 000000000000..1011146ea513
--- /dev/null
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -0,0 +1,221 @@
1/*
2 * PCI bus driver for Bosch C_CAN/D_CAN controller
3 *
4 * Copyright (C) 2012 Federico Vaga <federico.vaga@gmail.com>
5 *
6 * Borrowed from c_can_platform.c
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/netdevice.h>
16#include <linux/pci.h>
17
18#include <linux/can/dev.h>
19
20#include "c_can.h"
21
22enum c_can_pci_reg_align {
23 C_CAN_REG_ALIGN_16,
24 C_CAN_REG_ALIGN_32,
25};
26
27struct c_can_pci_data {
28 /* Specify if is C_CAN or D_CAN */
29 enum c_can_dev_id type;
30 /* Set the register alignment in the memory */
31 enum c_can_pci_reg_align reg_align;
32 /* Set the frequency */
33 unsigned int freq;
34};
35
36/*
37 * 16-bit c_can registers can be arranged differently in the memory
38 * architecture of different implementations. For example: 16-bit
39 * registers can be aligned to a 16-bit boundary or 32-bit boundary etc.
40 * Handle the same by providing a common read/write interface.
41 */
42static u16 c_can_pci_read_reg_aligned_to_16bit(struct c_can_priv *priv,
43 enum reg index)
44{
45 return readw(priv->base + priv->regs[index]);
46}
47
48static void c_can_pci_write_reg_aligned_to_16bit(struct c_can_priv *priv,
49 enum reg index, u16 val)
50{
51 writew(val, priv->base + priv->regs[index]);
52}
53
54static u16 c_can_pci_read_reg_aligned_to_32bit(struct c_can_priv *priv,
55 enum reg index)
56{
57 return readw(priv->base + 2 * priv->regs[index]);
58}
59
60static void c_can_pci_write_reg_aligned_to_32bit(struct c_can_priv *priv,
61 enum reg index, u16 val)
62{
63 writew(val, priv->base + 2 * priv->regs[index]);
64}
65
66static int __devinit c_can_pci_probe(struct pci_dev *pdev,
67 const struct pci_device_id *ent)
68{
69 struct c_can_pci_data *c_can_pci_data = (void *)ent->driver_data;
70 struct c_can_priv *priv;
71 struct net_device *dev;
72 void __iomem *addr;
73 int ret;
74
75 ret = pci_enable_device(pdev);
76 if (ret) {
77 dev_err(&pdev->dev, "pci_enable_device FAILED\n");
78 goto out;
79 }
80
81 ret = pci_request_regions(pdev, KBUILD_MODNAME);
82 if (ret) {
83 dev_err(&pdev->dev, "pci_request_regions FAILED\n");
84 goto out_disable_device;
85 }
86
87 pci_set_master(pdev);
88 pci_enable_msi(pdev);
89
90 addr = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
91 if (!addr) {
92 dev_err(&pdev->dev,
93 "device has no PCI memory resources, "
94 "failing adapter\n");
95 ret = -ENOMEM;
96 goto out_release_regions;
97 }
98
99 /* allocate the c_can device */
100 dev = alloc_c_can_dev();
101 if (!dev) {
102 ret = -ENOMEM;
103 goto out_iounmap;
104 }
105
106 priv = netdev_priv(dev);
107 pci_set_drvdata(pdev, dev);
108 SET_NETDEV_DEV(dev, &pdev->dev);
109
110 dev->irq = pdev->irq;
111 priv->base = addr;
112
113 if (!c_can_pci_data->freq) {
114 dev_err(&pdev->dev, "no clock frequency defined\n");
115 ret = -ENODEV;
116 goto out_free_c_can;
117 } else {
118 priv->can.clock.freq = c_can_pci_data->freq;
119 }
120
121 /* Configure CAN type */
122 switch (c_can_pci_data->type) {
123 case C_CAN_DEVTYPE:
124 priv->regs = reg_map_c_can;
125 break;
126 case D_CAN_DEVTYPE:
127 priv->regs = reg_map_d_can;
128 priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
129 break;
130 default:
131 ret = -EINVAL;
132 goto out_free_c_can;
133 }
134
135 /* Configure access to registers */
136 switch (c_can_pci_data->reg_align) {
137 case C_CAN_REG_ALIGN_32:
138 priv->read_reg = c_can_pci_read_reg_aligned_to_32bit;
139 priv->write_reg = c_can_pci_write_reg_aligned_to_32bit;
140 break;
141 case C_CAN_REG_ALIGN_16:
142 priv->read_reg = c_can_pci_read_reg_aligned_to_16bit;
143 priv->write_reg = c_can_pci_write_reg_aligned_to_16bit;
144 break;
145 default:
146 ret = -EINVAL;
147 goto out_free_c_can;
148 }
149
150 ret = register_c_can_dev(dev);
151 if (ret) {
152 dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
153 KBUILD_MODNAME, ret);
154 goto out_free_c_can;
155 }
156
157 dev_dbg(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
158 KBUILD_MODNAME, priv->regs, dev->irq);
159
160 return 0;
161
162out_free_c_can:
163 pci_set_drvdata(pdev, NULL);
164 free_c_can_dev(dev);
165out_iounmap:
166 pci_iounmap(pdev, addr);
167out_release_regions:
168 pci_disable_msi(pdev);
169 pci_clear_master(pdev);
170 pci_release_regions(pdev);
171out_disable_device:
172 pci_disable_device(pdev);
173out:
174 return ret;
175}
176
177static void __devexit c_can_pci_remove(struct pci_dev *pdev)
178{
179 struct net_device *dev = pci_get_drvdata(pdev);
180 struct c_can_priv *priv = netdev_priv(dev);
181
182 unregister_c_can_dev(dev);
183
184 pci_set_drvdata(pdev, NULL);
185 free_c_can_dev(dev);
186
187 pci_iounmap(pdev, priv->base);
188 pci_disable_msi(pdev);
189 pci_clear_master(pdev);
190 pci_release_regions(pdev);
191 pci_disable_device(pdev);
192}
193
194static struct c_can_pci_data c_can_sta2x11= {
195 .type = C_CAN_DEVTYPE,
196 .reg_align = C_CAN_REG_ALIGN_32,
197 .freq = 52000000, /* 52 Mhz */
198};
199
200#define C_CAN_ID(_vend, _dev, _driverdata) { \
201 PCI_DEVICE(_vend, _dev), \
202 .driver_data = (unsigned long)&_driverdata, \
203}
204static DEFINE_PCI_DEVICE_TABLE(c_can_pci_tbl) = {
205 C_CAN_ID(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_CAN,
206 c_can_sta2x11),
207 {},
208};
209static struct pci_driver c_can_pci_driver = {
210 .name = KBUILD_MODNAME,
211 .id_table = c_can_pci_tbl,
212 .probe = c_can_pci_probe,
213 .remove = __devexit_p(c_can_pci_remove),
214};
215
216module_pci_driver(c_can_pci_driver);
217
218MODULE_AUTHOR("Federico Vaga <federico.vaga@gmail.com>");
219MODULE_LICENSE("GPL v2");
220MODULE_DESCRIPTION("PCI CAN bus driver for Bosch C_CAN/D_CAN controller");
221MODULE_DEVICE_TABLE(pci, c_can_pci_tbl);
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 5e1a5ff6476e..f0921d16f0a9 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -42,27 +42,27 @@
42 * Handle the same by providing a common read/write interface. 42 * Handle the same by providing a common read/write interface.
43 */ 43 */
44static u16 c_can_plat_read_reg_aligned_to_16bit(struct c_can_priv *priv, 44static u16 c_can_plat_read_reg_aligned_to_16bit(struct c_can_priv *priv,
45 void *reg) 45 enum reg index)
46{ 46{
47 return readw(reg); 47 return readw(priv->base + priv->regs[index]);
48} 48}
49 49
50static void c_can_plat_write_reg_aligned_to_16bit(struct c_can_priv *priv, 50static void c_can_plat_write_reg_aligned_to_16bit(struct c_can_priv *priv,
51 void *reg, u16 val) 51 enum reg index, u16 val)
52{ 52{
53 writew(val, reg); 53 writew(val, priv->base + priv->regs[index]);
54} 54}
55 55
56static u16 c_can_plat_read_reg_aligned_to_32bit(struct c_can_priv *priv, 56static u16 c_can_plat_read_reg_aligned_to_32bit(struct c_can_priv *priv,
57 void *reg) 57 enum reg index)
58{ 58{
59 return readw(reg + (long)reg - (long)priv->regs); 59 return readw(priv->base + 2 * priv->regs[index]);
60} 60}
61 61
62static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv, 62static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv,
63 void *reg, u16 val) 63 enum reg index, u16 val)
64{ 64{
65 writew(val, reg + (long)reg - (long)priv->regs); 65 writew(val, priv->base + 2 * priv->regs[index]);
66} 66}
67 67
68static int __devinit c_can_plat_probe(struct platform_device *pdev) 68static int __devinit c_can_plat_probe(struct platform_device *pdev)
@@ -71,6 +71,7 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
71 void __iomem *addr; 71 void __iomem *addr;
72 struct net_device *dev; 72 struct net_device *dev;
73 struct c_can_priv *priv; 73 struct c_can_priv *priv;
74 const struct platform_device_id *id;
74 struct resource *mem; 75 struct resource *mem;
75 int irq; 76 int irq;
76#ifdef CONFIG_HAVE_CLK 77#ifdef CONFIG_HAVE_CLK
@@ -115,26 +116,40 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
115 } 116 }
116 117
117 priv = netdev_priv(dev); 118 priv = netdev_priv(dev);
119 id = platform_get_device_id(pdev);
120 switch (id->driver_data) {
121 case C_CAN_DEVTYPE:
122 priv->regs = reg_map_c_can;
123 switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) {
124 case IORESOURCE_MEM_32BIT:
125 priv->read_reg = c_can_plat_read_reg_aligned_to_32bit;
126 priv->write_reg = c_can_plat_write_reg_aligned_to_32bit;
127 break;
128 case IORESOURCE_MEM_16BIT:
129 default:
130 priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
131 priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
132 break;
133 }
134 break;
135 case D_CAN_DEVTYPE:
136 priv->regs = reg_map_d_can;
137 priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
138 priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
139 priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
140 break;
141 default:
142 ret = -EINVAL;
143 goto exit_free_device;
144 }
118 145
119 dev->irq = irq; 146 dev->irq = irq;
120 priv->regs = addr; 147 priv->base = addr;
121#ifdef CONFIG_HAVE_CLK 148#ifdef CONFIG_HAVE_CLK
122 priv->can.clock.freq = clk_get_rate(clk); 149 priv->can.clock.freq = clk_get_rate(clk);
123 priv->priv = clk; 150 priv->priv = clk;
124#endif 151#endif
125 152
126 switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) {
127 case IORESOURCE_MEM_32BIT:
128 priv->read_reg = c_can_plat_read_reg_aligned_to_32bit;
129 priv->write_reg = c_can_plat_write_reg_aligned_to_32bit;
130 break;
131 case IORESOURCE_MEM_16BIT:
132 default:
133 priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
134 priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
135 break;
136 }
137
138 platform_set_drvdata(pdev, dev); 153 platform_set_drvdata(pdev, dev);
139 SET_NETDEV_DEV(dev, &pdev->dev); 154 SET_NETDEV_DEV(dev, &pdev->dev);
140 155
@@ -146,7 +161,7 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
146 } 161 }
147 162
148 dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n", 163 dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
149 KBUILD_MODNAME, priv->regs, dev->irq); 164 KBUILD_MODNAME, priv->base, dev->irq);
150 return 0; 165 return 0;
151 166
152exit_free_device: 167exit_free_device:
@@ -176,7 +191,7 @@ static int __devexit c_can_plat_remove(struct platform_device *pdev)
176 platform_set_drvdata(pdev, NULL); 191 platform_set_drvdata(pdev, NULL);
177 192
178 free_c_can_dev(dev); 193 free_c_can_dev(dev);
179 iounmap(priv->regs); 194 iounmap(priv->base);
180 195
181 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 196 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
182 release_mem_region(mem->start, resource_size(mem)); 197 release_mem_region(mem->start, resource_size(mem));
@@ -188,6 +203,20 @@ static int __devexit c_can_plat_remove(struct platform_device *pdev)
188 return 0; 203 return 0;
189} 204}
190 205
206static const struct platform_device_id c_can_id_table[] = {
207 {
208 .name = KBUILD_MODNAME,
209 .driver_data = C_CAN_DEVTYPE,
210 }, {
211 .name = "c_can",
212 .driver_data = C_CAN_DEVTYPE,
213 }, {
214 .name = "d_can",
215 .driver_data = D_CAN_DEVTYPE,
216 }, {
217 }
218};
219
191static struct platform_driver c_can_plat_driver = { 220static struct platform_driver c_can_plat_driver = {
192 .driver = { 221 .driver = {
193 .name = KBUILD_MODNAME, 222 .name = KBUILD_MODNAME,
@@ -195,6 +224,7 @@ static struct platform_driver c_can_plat_driver = {
195 }, 224 },
196 .probe = c_can_plat_probe, 225 .probe = c_can_plat_probe,
197 .remove = __devexit_p(c_can_plat_remove), 226 .remove = __devexit_p(c_can_plat_remove),
227 .id_table = c_can_id_table,
198}; 228};
199 229
200module_platform_driver(c_can_plat_driver); 230module_platform_driver(c_can_plat_driver);
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index d42a6a7396f2..a138db11cbf0 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -695,7 +695,7 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
695 netif_wake_queue(dev); 695 netif_wake_queue(dev);
696} 696}
697 697
698irqreturn_t cc770_interrupt(int irq, void *dev_id) 698static irqreturn_t cc770_interrupt(int irq, void *dev_id)
699{ 699{
700 struct net_device *dev = (struct net_device *)dev_id; 700 struct net_device *dev = (struct net_device *)dev_id;
701 struct cc770_priv *priv = netdev_priv(dev); 701 struct cc770_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index f03d7a481a80..963e2ccd10db 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -33,6 +33,39 @@ MODULE_DESCRIPTION(MOD_DESC);
33MODULE_LICENSE("GPL v2"); 33MODULE_LICENSE("GPL v2");
34MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>"); 34MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
35 35
36/* CAN DLC to real data length conversion helpers */
37
38static const u8 dlc2len[] = {0, 1, 2, 3, 4, 5, 6, 7,
39 8, 12, 16, 20, 24, 32, 48, 64};
40
41/* get data length from can_dlc with sanitized can_dlc */
42u8 can_dlc2len(u8 can_dlc)
43{
44 return dlc2len[can_dlc & 0x0F];
45}
46EXPORT_SYMBOL_GPL(can_dlc2len);
47
48static const u8 len2dlc[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, /* 0 - 8 */
49 9, 9, 9, 9, /* 9 - 12 */
50 10, 10, 10, 10, /* 13 - 16 */
51 11, 11, 11, 11, /* 17 - 20 */
52 12, 12, 12, 12, /* 21 - 24 */
53 13, 13, 13, 13, 13, 13, 13, 13, /* 25 - 32 */
54 14, 14, 14, 14, 14, 14, 14, 14, /* 33 - 40 */
55 14, 14, 14, 14, 14, 14, 14, 14, /* 41 - 48 */
56 15, 15, 15, 15, 15, 15, 15, 15, /* 49 - 56 */
57 15, 15, 15, 15, 15, 15, 15, 15}; /* 57 - 64 */
58
59/* map the sanitized data length to an appropriate data length code */
60u8 can_len2dlc(u8 len)
61{
62 if (unlikely(len > 64))
63 return 0xF;
64
65 return len2dlc[len];
66}
67EXPORT_SYMBOL_GPL(can_len2dlc);
68
36#ifdef CONFIG_CAN_CALC_BITTIMING 69#ifdef CONFIG_CAN_CALC_BITTIMING
37#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */ 70#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
38 71
@@ -368,7 +401,7 @@ EXPORT_SYMBOL_GPL(can_free_echo_skb);
368/* 401/*
369 * CAN device restart for bus-off recovery 402 * CAN device restart for bus-off recovery
370 */ 403 */
371void can_restart(unsigned long data) 404static void can_restart(unsigned long data)
372{ 405{
373 struct net_device *dev = (struct net_device *)data; 406 struct net_device *dev = (struct net_device *)data;
374 struct can_priv *priv = netdev_priv(dev); 407 struct can_priv *priv = netdev_priv(dev);
@@ -454,7 +487,7 @@ EXPORT_SYMBOL_GPL(can_bus_off);
454static void can_setup(struct net_device *dev) 487static void can_setup(struct net_device *dev)
455{ 488{
456 dev->type = ARPHRD_CAN; 489 dev->type = ARPHRD_CAN;
457 dev->mtu = sizeof(struct can_frame); 490 dev->mtu = CAN_MTU;
458 dev->hard_header_len = 0; 491 dev->hard_header_len = 0;
459 dev->addr_len = 0; 492 dev->addr_len = 0;
460 dev->tx_queue_len = 10; 493 dev->tx_queue_len = 10;
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 81d474102378..1b6f5621ce89 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -34,6 +34,7 @@
34#include <linux/list.h> 34#include <linux/list.h>
35#include <linux/module.h> 35#include <linux/module.h>
36#include <linux/of.h> 36#include <linux/of.h>
37#include <linux/of_device.h>
37#include <linux/platform_device.h> 38#include <linux/platform_device.h>
38#include <linux/pinctrl/consumer.h> 39#include <linux/pinctrl/consumer.h>
39 40
@@ -165,10 +166,21 @@ struct flexcan_regs {
165 u32 imask1; /* 0x28 */ 166 u32 imask1; /* 0x28 */
166 u32 iflag2; /* 0x2c */ 167 u32 iflag2; /* 0x2c */
167 u32 iflag1; /* 0x30 */ 168 u32 iflag1; /* 0x30 */
168 u32 _reserved2[19]; 169 u32 crl2; /* 0x34 */
170 u32 esr2; /* 0x38 */
171 u32 imeur; /* 0x3c */
172 u32 lrfr; /* 0x40 */
173 u32 crcr; /* 0x44 */
174 u32 rxfgmask; /* 0x48 */
175 u32 rxfir; /* 0x4c */
176 u32 _reserved3[12];
169 struct flexcan_mb cantxfg[64]; 177 struct flexcan_mb cantxfg[64];
170}; 178};
171 179
180struct flexcan_devtype_data {
181 u32 hw_ver; /* hardware controller version */
182};
183
172struct flexcan_priv { 184struct flexcan_priv {
173 struct can_priv can; 185 struct can_priv can;
174 struct net_device *dev; 186 struct net_device *dev;
@@ -180,6 +192,15 @@ struct flexcan_priv {
180 192
181 struct clk *clk; 193 struct clk *clk;
182 struct flexcan_platform_data *pdata; 194 struct flexcan_platform_data *pdata;
195 const struct flexcan_devtype_data *devtype_data;
196};
197
198static struct flexcan_devtype_data fsl_p1010_devtype_data = {
199 .hw_ver = 3,
200};
201
202static struct flexcan_devtype_data fsl_imx6q_devtype_data = {
203 .hw_ver = 10,
183}; 204};
184 205
185static struct can_bittiming_const flexcan_bittiming_const = { 206static struct can_bittiming_const flexcan_bittiming_const = {
@@ -750,6 +771,9 @@ static int flexcan_chip_start(struct net_device *dev)
750 flexcan_write(0x0, &regs->rx14mask); 771 flexcan_write(0x0, &regs->rx14mask);
751 flexcan_write(0x0, &regs->rx15mask); 772 flexcan_write(0x0, &regs->rx15mask);
752 773
774 if (priv->devtype_data->hw_ver >= 10)
775 flexcan_write(0x0, &regs->rxfgmask);
776
753 flexcan_transceiver_switch(priv, 1); 777 flexcan_transceiver_switch(priv, 1);
754 778
755 /* synchronize with the can bus */ 779 /* synchronize with the can bus */
@@ -922,8 +946,21 @@ static void __devexit unregister_flexcandev(struct net_device *dev)
922 unregister_candev(dev); 946 unregister_candev(dev);
923} 947}
924 948
949static const struct of_device_id flexcan_of_match[] = {
950 { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
951 { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
952 { /* sentinel */ },
953};
954
955static const struct platform_device_id flexcan_id_table[] = {
956 { .name = "flexcan", .driver_data = (kernel_ulong_t)&fsl_p1010_devtype_data, },
957 { /* sentinel */ },
958};
959
925static int __devinit flexcan_probe(struct platform_device *pdev) 960static int __devinit flexcan_probe(struct platform_device *pdev)
926{ 961{
962 const struct of_device_id *of_id;
963 const struct flexcan_devtype_data *devtype_data;
927 struct net_device *dev; 964 struct net_device *dev;
928 struct flexcan_priv *priv; 965 struct flexcan_priv *priv;
929 struct resource *mem; 966 struct resource *mem;
@@ -938,14 +975,9 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
938 if (IS_ERR(pinctrl)) 975 if (IS_ERR(pinctrl))
939 return PTR_ERR(pinctrl); 976 return PTR_ERR(pinctrl);
940 977
941 if (pdev->dev.of_node) { 978 if (pdev->dev.of_node)
942 const __be32 *clock_freq_p; 979 of_property_read_u32(pdev->dev.of_node,
943 980 "clock-frequency", &clock_freq);
944 clock_freq_p = of_get_property(pdev->dev.of_node,
945 "clock-frequency", NULL);
946 if (clock_freq_p)
947 clock_freq = be32_to_cpup(clock_freq_p);
948 }
949 981
950 if (!clock_freq) { 982 if (!clock_freq) {
951 clk = clk_get(&pdev->dev, NULL); 983 clk = clk_get(&pdev->dev, NULL);
@@ -982,6 +1014,17 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
982 goto failed_alloc; 1014 goto failed_alloc;
983 } 1015 }
984 1016
1017 of_id = of_match_device(flexcan_of_match, &pdev->dev);
1018 if (of_id) {
1019 devtype_data = of_id->data;
1020 } else if (pdev->id_entry->driver_data) {
1021 devtype_data = (struct flexcan_devtype_data *)
1022 pdev->id_entry->driver_data;
1023 } else {
1024 err = -ENODEV;
1025 goto failed_devtype;
1026 }
1027
985 dev->netdev_ops = &flexcan_netdev_ops; 1028 dev->netdev_ops = &flexcan_netdev_ops;
986 dev->irq = irq; 1029 dev->irq = irq;
987 dev->flags |= IFF_ECHO; 1030 dev->flags |= IFF_ECHO;
@@ -998,6 +1041,7 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
998 priv->dev = dev; 1041 priv->dev = dev;
999 priv->clk = clk; 1042 priv->clk = clk;
1000 priv->pdata = pdev->dev.platform_data; 1043 priv->pdata = pdev->dev.platform_data;
1044 priv->devtype_data = devtype_data;
1001 1045
1002 netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT); 1046 netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT);
1003 1047
@@ -1016,6 +1060,7 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
1016 return 0; 1060 return 0;
1017 1061
1018 failed_register: 1062 failed_register:
1063 failed_devtype:
1019 free_candev(dev); 1064 free_candev(dev);
1020 failed_alloc: 1065 failed_alloc:
1021 iounmap(base); 1066 iounmap(base);
@@ -1049,12 +1094,41 @@ static int __devexit flexcan_remove(struct platform_device *pdev)
1049 return 0; 1094 return 0;
1050} 1095}
1051 1096
1052static struct of_device_id flexcan_of_match[] = { 1097#ifdef CONFIG_PM
1053 { 1098static int flexcan_suspend(struct platform_device *pdev, pm_message_t state)
1054 .compatible = "fsl,p1010-flexcan", 1099{
1055 }, 1100 struct net_device *dev = platform_get_drvdata(pdev);
1056 {}, 1101 struct flexcan_priv *priv = netdev_priv(dev);
1057}; 1102
1103 flexcan_chip_disable(priv);
1104
1105 if (netif_running(dev)) {
1106 netif_stop_queue(dev);
1107 netif_device_detach(dev);
1108 }
1109 priv->can.state = CAN_STATE_SLEEPING;
1110
1111 return 0;
1112}
1113
1114static int flexcan_resume(struct platform_device *pdev)
1115{
1116 struct net_device *dev = platform_get_drvdata(pdev);
1117 struct flexcan_priv *priv = netdev_priv(dev);
1118
1119 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1120 if (netif_running(dev)) {
1121 netif_device_attach(dev);
1122 netif_start_queue(dev);
1123 }
1124 flexcan_chip_enable(priv);
1125
1126 return 0;
1127}
1128#else
1129#define flexcan_suspend NULL
1130#define flexcan_resume NULL
1131#endif
1058 1132
1059static struct platform_driver flexcan_driver = { 1133static struct platform_driver flexcan_driver = {
1060 .driver = { 1134 .driver = {
@@ -1064,6 +1138,9 @@ static struct platform_driver flexcan_driver = {
1064 }, 1138 },
1065 .probe = flexcan_probe, 1139 .probe = flexcan_probe,
1066 .remove = __devexit_p(flexcan_remove), 1140 .remove = __devexit_p(flexcan_remove),
1141 .suspend = flexcan_suspend,
1142 .resume = flexcan_resume,
1143 .id_table = flexcan_id_table,
1067}; 1144};
1068 1145
1069module_platform_driver(flexcan_driver); 1146module_platform_driver(flexcan_driver);
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 346785c56a25..9120a36ec702 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -1020,8 +1020,7 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
1020 GFP_DMA); 1020 GFP_DMA);
1021 1021
1022 if (priv->spi_tx_buf) { 1022 if (priv->spi_tx_buf) {
1023 priv->spi_rx_buf = (u8 *)(priv->spi_tx_buf + 1023 priv->spi_rx_buf = (priv->spi_tx_buf + (PAGE_SIZE / 2));
1024 (PAGE_SIZE / 2));
1025 priv->spi_rx_dma = (dma_addr_t)(priv->spi_tx_dma + 1024 priv->spi_rx_dma = (dma_addr_t)(priv->spi_tx_dma +
1026 (PAGE_SIZE / 2)); 1025 (PAGE_SIZE / 2));
1027 } else { 1026 } else {
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 5caa572d71e3..06adf881ea24 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -251,7 +251,7 @@ static struct of_device_id mpc5xxx_can_table[];
251static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev) 251static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev)
252{ 252{
253 const struct of_device_id *match; 253 const struct of_device_id *match;
254 struct mpc5xxx_can_data *data; 254 const struct mpc5xxx_can_data *data;
255 struct device_node *np = ofdev->dev.of_node; 255 struct device_node *np = ofdev->dev.of_node;
256 struct net_device *dev; 256 struct net_device *dev;
257 struct mscan_priv *priv; 257 struct mscan_priv *priv;
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index a7c77c744ee9..f2a221e7b968 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -826,12 +826,12 @@ static __devinit int softing_pdev_probe(struct platform_device *pdev)
826 goto sysfs_failed; 826 goto sysfs_failed;
827 } 827 }
828 828
829 ret = -ENOMEM;
830 for (j = 0; j < ARRAY_SIZE(card->net); ++j) { 829 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
831 card->net[j] = netdev = 830 card->net[j] = netdev =
832 softing_netdev_create(card, card->id.chip[j]); 831 softing_netdev_create(card, card->id.chip[j]);
833 if (!netdev) { 832 if (!netdev) {
834 dev_alert(&pdev->dev, "failed to make can[%i]", j); 833 dev_alert(&pdev->dev, "failed to make can[%i]", j);
834 ret = -ENOMEM;
835 goto netdev_failed; 835 goto netdev_failed;
836 } 836 }
837 priv = netdev_priv(card->net[j]); 837 priv = netdev_priv(card->net[j]);
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index ea2d94285936..4f93c0be0053 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -70,13 +70,12 @@ MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)");
70 70
71static void vcan_rx(struct sk_buff *skb, struct net_device *dev) 71static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
72{ 72{
73 struct can_frame *cf = (struct can_frame *)skb->data; 73 struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
74 struct net_device_stats *stats = &dev->stats; 74 struct net_device_stats *stats = &dev->stats;
75 75
76 stats->rx_packets++; 76 stats->rx_packets++;
77 stats->rx_bytes += cf->can_dlc; 77 stats->rx_bytes += cfd->len;
78 78
79 skb->protocol = htons(ETH_P_CAN);
80 skb->pkt_type = PACKET_BROADCAST; 79 skb->pkt_type = PACKET_BROADCAST;
81 skb->dev = dev; 80 skb->dev = dev;
82 skb->ip_summed = CHECKSUM_UNNECESSARY; 81 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -86,7 +85,7 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
86 85
87static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev) 86static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
88{ 87{
89 struct can_frame *cf = (struct can_frame *)skb->data; 88 struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
90 struct net_device_stats *stats = &dev->stats; 89 struct net_device_stats *stats = &dev->stats;
91 int loop; 90 int loop;
92 91
@@ -94,7 +93,7 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
94 return NETDEV_TX_OK; 93 return NETDEV_TX_OK;
95 94
96 stats->tx_packets++; 95 stats->tx_packets++;
97 stats->tx_bytes += cf->can_dlc; 96 stats->tx_bytes += cfd->len;
98 97
99 /* set flag whether this packet has to be looped back */ 98 /* set flag whether this packet has to be looped back */
100 loop = skb->pkt_type == PACKET_LOOPBACK; 99 loop = skb->pkt_type == PACKET_LOOPBACK;
@@ -108,7 +107,7 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
108 * CAN core already did the echo for us 107 * CAN core already did the echo for us
109 */ 108 */
110 stats->rx_packets++; 109 stats->rx_packets++;
111 stats->rx_bytes += cf->can_dlc; 110 stats->rx_bytes += cfd->len;
112 } 111 }
113 kfree_skb(skb); 112 kfree_skb(skb);
114 return NETDEV_TX_OK; 113 return NETDEV_TX_OK;
@@ -133,14 +132,28 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
133 return NETDEV_TX_OK; 132 return NETDEV_TX_OK;
134} 133}
135 134
135static int vcan_change_mtu(struct net_device *dev, int new_mtu)
136{
137 /* Do not allow changing the MTU while running */
138 if (dev->flags & IFF_UP)
139 return -EBUSY;
140
141 if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU)
142 return -EINVAL;
143
144 dev->mtu = new_mtu;
145 return 0;
146}
147
136static const struct net_device_ops vcan_netdev_ops = { 148static const struct net_device_ops vcan_netdev_ops = {
137 .ndo_start_xmit = vcan_tx, 149 .ndo_start_xmit = vcan_tx,
150 .ndo_change_mtu = vcan_change_mtu,
138}; 151};
139 152
140static void vcan_setup(struct net_device *dev) 153static void vcan_setup(struct net_device *dev)
141{ 154{
142 dev->type = ARPHRD_CAN; 155 dev->type = ARPHRD_CAN;
143 dev->mtu = sizeof(struct can_frame); 156 dev->mtu = CAN_MTU;
144 dev->hard_header_len = 0; 157 dev->hard_header_len = 0;
145 dev->addr_len = 0; 158 dev->addr_len = 0;
146 dev->tx_queue_len = 0; 159 dev->tx_queue_len = 0;
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 9c755db6b16d..f0c8bd54ce29 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1008,7 +1008,7 @@ e100_send_mdio_bit(unsigned char bit)
1008} 1008}
1009 1009
1010static unsigned char 1010static unsigned char
1011e100_receive_mdio_bit() 1011e100_receive_mdio_bit(void)
1012{ 1012{
1013 unsigned char bit; 1013 unsigned char bit;
1014 *R_NETWORK_MGM_CTRL = 0; 1014 *R_NETWORK_MGM_CTRL = 0;
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index bab0158f1cc3..9d6a0677466b 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -40,18 +40,6 @@
40 40
41static int numdummies = 1; 41static int numdummies = 1;
42 42
43static int dummy_set_address(struct net_device *dev, void *p)
44{
45 struct sockaddr *sa = p;
46
47 if (!is_valid_ether_addr(sa->sa_data))
48 return -EADDRNOTAVAIL;
49
50 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
51 memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
52 return 0;
53}
54
55/* fake multicast ability */ 43/* fake multicast ability */
56static void set_multicast_list(struct net_device *dev) 44static void set_multicast_list(struct net_device *dev)
57{ 45{
@@ -118,7 +106,7 @@ static const struct net_device_ops dummy_netdev_ops = {
118 .ndo_start_xmit = dummy_xmit, 106 .ndo_start_xmit = dummy_xmit,
119 .ndo_validate_addr = eth_validate_addr, 107 .ndo_validate_addr = eth_validate_addr,
120 .ndo_set_rx_mode = set_multicast_list, 108 .ndo_set_rx_mode = set_multicast_list,
121 .ndo_set_mac_address = dummy_set_address, 109 .ndo_set_mac_address = eth_mac_addr,
122 .ndo_get_stats64 = dummy_get_stats64, 110 .ndo_get_stats64 = dummy_get_stats64,
123}; 111};
124 112
@@ -134,6 +122,7 @@ static void dummy_setup(struct net_device *dev)
134 dev->tx_queue_len = 0; 122 dev->tx_queue_len = 0;
135 dev->flags |= IFF_NOARP; 123 dev->flags |= IFF_NOARP;
136 dev->flags &= ~IFF_MULTICAST; 124 dev->flags &= ~IFF_MULTICAST;
125 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
137 dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO; 126 dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO;
138 dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX; 127 dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
139 eth_hw_addr_random(dev); 128 eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/3com/3c501.c b/drivers/net/ethernet/3com/3c501.c
index bf73e1a02293..2038eaabaea4 100644
--- a/drivers/net/ethernet/3com/3c501.c
+++ b/drivers/net/ethernet/3com/3c501.c
@@ -143,7 +143,7 @@ static int irq = 5;
143static int mem_start; 143static int mem_start;
144 144
145/** 145/**
146 * el1_probe: - probe for a 3c501 146 * el1_probe - probe for a 3c501
147 * @dev: The device structure passed in to probe. 147 * @dev: The device structure passed in to probe.
148 * 148 *
149 * This can be called from two places. The network layer will probe using 149 * This can be called from two places. The network layer will probe using
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index 2e538676924d..e1219e037c04 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -162,6 +162,20 @@ config MAC8390
162 and read the Ethernet-HOWTO, available from 162 and read the Ethernet-HOWTO, available from
163 <http://www.tldp.org/docs.html#howto>. 163 <http://www.tldp.org/docs.html#howto>.
164 164
165config MCF8390
166 tristate "ColdFire NS8390 based Ethernet support"
167 depends on COLDFIRE
168 select CRC32
169 ---help---
170 This driver is for Ethernet devices using an NS8390-compatible
171 chipset on many common ColdFire CPU based boards. Many of the older
172 Freescale dev boards use this, and some other common boards like
173 some SnapGear routers do as well.
174
175 If you have one of these boards and want to use the network interface
176 on them then choose Y. To compile this driver as a module, choose M
177 here, the module will be called mcf8390.
178
165config NE2000 179config NE2000
166 tristate "NE2000/NE1000 support" 180 tristate "NE2000/NE1000 support"
167 depends on (ISA || (Q40 && m) || M32R || MACH_TX49XX) 181 depends on (ISA || (Q40 && m) || M32R || MACH_TX49XX)
diff --git a/drivers/net/ethernet/8390/Makefile b/drivers/net/ethernet/8390/Makefile
index d13790b7fd27..f43038babf86 100644
--- a/drivers/net/ethernet/8390/Makefile
+++ b/drivers/net/ethernet/8390/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_HPLAN_PLUS) += hp-plus.o 8390p.o
14obj-$(CONFIG_HPLAN) += hp.o 8390p.o 14obj-$(CONFIG_HPLAN) += hp.o 8390p.o
15obj-$(CONFIG_HYDRA) += hydra.o 8390.o 15obj-$(CONFIG_HYDRA) += hydra.o 8390.o
16obj-$(CONFIG_LNE390) += lne390.o 8390.o 16obj-$(CONFIG_LNE390) += lne390.o 8390.o
17obj-$(CONFIG_MCF8390) += mcf8390.o 8390.o
17obj-$(CONFIG_NE2000) += ne.o 8390p.o 18obj-$(CONFIG_NE2000) += ne.o 8390p.o
18obj-$(CONFIG_NE2_MCA) += ne2.o 8390p.o 19obj-$(CONFIG_NE2_MCA) += ne2.o 8390p.o
19obj-$(CONFIG_NE2K_PCI) += ne2k-pci.o 8390.o 20obj-$(CONFIG_NE2K_PCI) += ne2k-pci.o 8390.o
diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c
index 923959275a82..912ed7a5f33a 100644
--- a/drivers/net/ethernet/8390/apne.c
+++ b/drivers/net/ethernet/8390/apne.c
@@ -454,7 +454,7 @@ apne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int rin
454 buf[count-1] = inb(NE_BASE + NE_DATAPORT); 454 buf[count-1] = inb(NE_BASE + NE_DATAPORT);
455 } 455 }
456 } else { 456 } else {
457 ptrc = (char*)buf; 457 ptrc = buf;
458 for (cnt = 0; cnt < count; cnt++) 458 for (cnt = 0; cnt < count; cnt++)
459 *ptrc++ = inb(NE_BASE + NE_DATAPORT); 459 *ptrc++ = inb(NE_BASE + NE_DATAPORT);
460 } 460 }
diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c
new file mode 100644
index 000000000000..230efd6fa5d5
--- /dev/null
+++ b/drivers/net/ethernet/8390/mcf8390.c
@@ -0,0 +1,480 @@
1/*
2 * Support for ColdFire CPU based boards using a NS8390 Ethernet device.
3 *
4 * Derived from the many other 8390 drivers.
5 *
6 * (C) Copyright 2012, Greg Ungerer <gerg@uclinux.org>
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file COPYING in the main directory of the Linux
10 * distribution for more details.
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/errno.h>
16#include <linux/init.h>
17#include <linux/platform_device.h>
18#include <linux/netdevice.h>
19#include <linux/etherdevice.h>
20#include <linux/jiffies.h>
21#include <linux/io.h>
22#include <asm/mcf8390.h>
23
24static const char version[] =
25 "mcf8390.c: (15-06-2012) Greg Ungerer <gerg@uclinux.org>";
26
27#define NE_CMD 0x00
28#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset */
29#define NE_RESET 0x1f /* Issue a read to reset ,a write to clear */
30#define NE_EN0_ISR 0x07
31#define NE_EN0_DCFG 0x0e
32#define NE_EN0_RSARLO 0x08
33#define NE_EN0_RSARHI 0x09
34#define NE_EN0_RCNTLO 0x0a
35#define NE_EN0_RXCR 0x0c
36#define NE_EN0_TXCR 0x0d
37#define NE_EN0_RCNTHI 0x0b
38#define NE_EN0_IMR 0x0f
39
40#define NESM_START_PG 0x40 /* First page of TX buffer */
41#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
42
43#ifdef NE2000_ODDOFFSET
44/*
45 * A lot of the ColdFire boards use a separate address region for odd offset
46 * register addresses. The following functions convert and map as required.
47 * Note that the data port accesses are treated a little differently, and
48 * always accessed via the insX/outsX functions.
49 */
50static inline u32 NE_PTR(u32 addr)
51{
52 if (addr & 1)
53 return addr - 1 + NE2000_ODDOFFSET;
54 return addr;
55}
56
57static inline u32 NE_DATA_PTR(u32 addr)
58{
59 return addr;
60}
61
62void ei_outb(u32 val, u32 addr)
63{
64 NE2000_BYTE *rp;
65
66 rp = (NE2000_BYTE *) NE_PTR(addr);
67 *rp = RSWAP(val);
68}
69
70#define ei_inb ei_inb
71u8 ei_inb(u32 addr)
72{
73 NE2000_BYTE *rp, val;
74
75 rp = (NE2000_BYTE *) NE_PTR(addr);
76 val = *rp;
77 return (u8) (RSWAP(val) & 0xff);
78}
79
80void ei_insb(u32 addr, void *vbuf, int len)
81{
82 NE2000_BYTE *rp, val;
83 u8 *buf;
84
85 buf = (u8 *) vbuf;
86 rp = (NE2000_BYTE *) NE_DATA_PTR(addr);
87 for (; (len > 0); len--) {
88 val = *rp;
89 *buf++ = RSWAP(val);
90 }
91}
92
93void ei_insw(u32 addr, void *vbuf, int len)
94{
95 volatile u16 *rp;
96 u16 w, *buf;
97
98 buf = (u16 *) vbuf;
99 rp = (volatile u16 *) NE_DATA_PTR(addr);
100 for (; (len > 0); len--) {
101 w = *rp;
102 *buf++ = BSWAP(w);
103 }
104}
105
106void ei_outsb(u32 addr, const void *vbuf, int len)
107{
108 NE2000_BYTE *rp, val;
109 u8 *buf;
110
111 buf = (u8 *) vbuf;
112 rp = (NE2000_BYTE *) NE_DATA_PTR(addr);
113 for (; (len > 0); len--) {
114 val = *buf++;
115 *rp = RSWAP(val);
116 }
117}
118
119void ei_outsw(u32 addr, const void *vbuf, int len)
120{
121 volatile u16 *rp;
122 u16 w, *buf;
123
124 buf = (u16 *) vbuf;
125 rp = (volatile u16 *) NE_DATA_PTR(addr);
126 for (; (len > 0); len--) {
127 w = *buf++;
128 *rp = BSWAP(w);
129 }
130}
131
132#else /* !NE2000_ODDOFFSET */
133
134#define ei_inb inb
135#define ei_outb outb
136#define ei_insb insb
137#define ei_insw insw
138#define ei_outsb outsb
139#define ei_outsw outsw
140
141#endif /* !NE2000_ODDOFFSET */
142
143#define ei_inb_p ei_inb
144#define ei_outb_p ei_outb
145
146#include "lib8390.c"
147
148/*
149 * Hard reset the card. This used to pause for the same period that a
150 * 8390 reset command required, but that shouldn't be necessary.
151 */
152static void mcf8390_reset_8390(struct net_device *dev)
153{
154 unsigned long reset_start_time = jiffies;
155 u32 addr = dev->base_addr;
156
157 if (ei_debug > 1)
158 netdev_dbg(dev, "resetting the 8390 t=%ld...\n", jiffies);
159
160 ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET);
161
162 ei_status.txing = 0;
163 ei_status.dmaing = 0;
164
165 /* This check _should_not_ be necessary, omit eventually. */
166 while ((ei_inb(addr + NE_EN0_ISR) & ENISR_RESET) == 0) {
167 if (time_after(jiffies, reset_start_time + 2 * HZ / 100)) {
168 netdev_warn(dev, "%s: did not complete\n", __func__);
169 break;
170 }
171 }
172
173 ei_outb(ENISR_RESET, addr + NE_EN0_ISR);
174}
175
176/*
177 * This *shouldn't* happen.
178 * If it does, it's the last thing you'll see
179 */
180static void mcf8390_dmaing_err(const char *func, struct net_device *dev,
181 struct ei_device *ei_local)
182{
183 netdev_err(dev, "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n",
184 func, ei_local->dmaing, ei_local->irqlock);
185}
186
187/*
188 * Grab the 8390 specific header. Similar to the block_input routine, but
189 * we don't need to be concerned with ring wrap as the header will be at
190 * the start of a page, so we optimize accordingly.
191 */
192static void mcf8390_get_8390_hdr(struct net_device *dev,
193 struct e8390_pkt_hdr *hdr, int ring_page)
194{
195 struct ei_device *ei_local = netdev_priv(dev);
196 u32 addr = dev->base_addr;
197
198 if (ei_local->dmaing) {
199 mcf8390_dmaing_err(__func__, dev, ei_local);
200 return;
201 }
202
203 ei_local->dmaing |= 0x01;
204 ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, addr + NE_CMD);
205 ei_outb(ENISR_RDC, addr + NE_EN0_ISR);
206 ei_outb(sizeof(struct e8390_pkt_hdr), addr + NE_EN0_RCNTLO);
207 ei_outb(0, addr + NE_EN0_RCNTHI);
208 ei_outb(0, addr + NE_EN0_RSARLO); /* On page boundary */
209 ei_outb(ring_page, addr + NE_EN0_RSARHI);
210 ei_outb(E8390_RREAD + E8390_START, addr + NE_CMD);
211
212 ei_insw(addr + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr) >> 1);
213
214 outb(ENISR_RDC, addr + NE_EN0_ISR); /* Ack intr */
215 ei_local->dmaing &= ~0x01;
216
217 hdr->count = cpu_to_le16(hdr->count);
218}
219
220/*
221 * Block input and output, similar to the Crynwr packet driver.
222 * If you are porting to a new ethercard, look at the packet driver source
223 * for hints. The NEx000 doesn't share the on-board packet memory --
224 * you have to put the packet out through the "remote DMA" dataport
225 * using z_writeb.
226 */
227static void mcf8390_block_input(struct net_device *dev, int count,
228 struct sk_buff *skb, int ring_offset)
229{
230 struct ei_device *ei_local = netdev_priv(dev);
231 u32 addr = dev->base_addr;
232 char *buf = skb->data;
233
234 if (ei_local->dmaing) {
235 mcf8390_dmaing_err(__func__, dev, ei_local);
236 return;
237 }
238
239 ei_local->dmaing |= 0x01;
240 ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, addr + NE_CMD);
241 ei_outb(ENISR_RDC, addr + NE_EN0_ISR);
242 ei_outb(count & 0xff, addr + NE_EN0_RCNTLO);
243 ei_outb(count >> 8, addr + NE_EN0_RCNTHI);
244 ei_outb(ring_offset & 0xff, addr + NE_EN0_RSARLO);
245 ei_outb(ring_offset >> 8, addr + NE_EN0_RSARHI);
246 ei_outb(E8390_RREAD + E8390_START, addr + NE_CMD);
247
248 ei_insw(addr + NE_DATAPORT, buf, count >> 1);
249 if (count & 1)
250 buf[count - 1] = ei_inb(addr + NE_DATAPORT);
251
252 ei_outb(ENISR_RDC, addr + NE_EN0_ISR); /* Ack intr */
253 ei_local->dmaing &= ~0x01;
254}
255
256static void mcf8390_block_output(struct net_device *dev, int count,
257 const unsigned char *buf,
258 const int start_page)
259{
260 struct ei_device *ei_local = netdev_priv(dev);
261 u32 addr = dev->base_addr;
262 unsigned long dma_start;
263
264 /* Make sure we transfer all bytes if 16bit IO writes */
265 if (count & 0x1)
266 count++;
267
268 if (ei_local->dmaing) {
269 mcf8390_dmaing_err(__func__, dev, ei_local);
270 return;
271 }
272
273 ei_local->dmaing |= 0x01;
274 /* We should already be in page 0, but to be safe... */
275 ei_outb(E8390_PAGE0 + E8390_START + E8390_NODMA, addr + NE_CMD);
276
277 ei_outb(ENISR_RDC, addr + NE_EN0_ISR);
278
279 /* Now the normal output. */
280 ei_outb(count & 0xff, addr + NE_EN0_RCNTLO);
281 ei_outb(count >> 8, addr + NE_EN0_RCNTHI);
282 ei_outb(0x00, addr + NE_EN0_RSARLO);
283 ei_outb(start_page, addr + NE_EN0_RSARHI);
284 ei_outb(E8390_RWRITE + E8390_START, addr + NE_CMD);
285
286 ei_outsw(addr + NE_DATAPORT, buf, count >> 1);
287
288 dma_start = jiffies;
289 while ((ei_inb(addr + NE_EN0_ISR) & ENISR_RDC) == 0) {
290 if (time_after(jiffies, dma_start + 2 * HZ / 100)) { /* 20ms */
291 netdev_err(dev, "timeout waiting for Tx RDC\n");
292 mcf8390_reset_8390(dev);
293 __NS8390_init(dev, 1);
294 break;
295 }
296 }
297
298 ei_outb(ENISR_RDC, addr + NE_EN0_ISR); /* Ack intr */
299 ei_local->dmaing &= ~0x01;
300}
301
302static const struct net_device_ops mcf8390_netdev_ops = {
303 .ndo_open = __ei_open,
304 .ndo_stop = __ei_close,
305 .ndo_start_xmit = __ei_start_xmit,
306 .ndo_tx_timeout = __ei_tx_timeout,
307 .ndo_get_stats = __ei_get_stats,
308 .ndo_set_rx_mode = __ei_set_multicast_list,
309 .ndo_validate_addr = eth_validate_addr,
310 .ndo_set_mac_address = eth_mac_addr,
311 .ndo_change_mtu = eth_change_mtu,
312#ifdef CONFIG_NET_POLL_CONTROLLER
313 .ndo_poll_controller = __ei_poll,
314#endif
315};
316
317static int mcf8390_init(struct net_device *dev)
318{
319 static u32 offsets[] = {
320 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
321 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
322 };
323 struct ei_device *ei_local = netdev_priv(dev);
324 unsigned char SA_prom[32];
325 u32 addr = dev->base_addr;
326 int start_page, stop_page;
327 int i, ret;
328
329 mcf8390_reset_8390(dev);
330
331 /*
332 * Read the 16 bytes of station address PROM.
333 * We must first initialize registers,
334 * similar to NS8390_init(eifdev, 0).
335 * We can't reliably read the SAPROM address without this.
336 * (I learned the hard way!).
337 */
338 {
339 static const struct {
340 u32 value;
341 u32 offset;
342 } program_seq[] = {
343 {E8390_NODMA + E8390_PAGE0 + E8390_STOP, NE_CMD},
344 /* Select page 0 */
345 {0x48, NE_EN0_DCFG}, /* 0x48: Set byte-wide access */
346 {0x00, NE_EN0_RCNTLO}, /* Clear the count regs */
347 {0x00, NE_EN0_RCNTHI},
348 {0x00, NE_EN0_IMR}, /* Mask completion irq */
349 {0xFF, NE_EN0_ISR},
350 {E8390_RXOFF, NE_EN0_RXCR}, /* 0x20 Set to monitor */
351 {E8390_TXOFF, NE_EN0_TXCR}, /* 0x02 and loopback mode */
352 {32, NE_EN0_RCNTLO},
353 {0x00, NE_EN0_RCNTHI},
354 {0x00, NE_EN0_RSARLO}, /* DMA starting at 0x0000 */
355 {0x00, NE_EN0_RSARHI},
356 {E8390_RREAD + E8390_START, NE_CMD},
357 };
358 for (i = 0; i < ARRAY_SIZE(program_seq); i++) {
359 ei_outb(program_seq[i].value,
360 addr + program_seq[i].offset);
361 }
362 }
363
364 for (i = 0; i < 16; i++) {
365 SA_prom[i] = ei_inb(addr + NE_DATAPORT);
366 ei_inb(addr + NE_DATAPORT);
367 }
368
369 /* We must set the 8390 for word mode. */
370 ei_outb(0x49, addr + NE_EN0_DCFG);
371 start_page = NESM_START_PG;
372 stop_page = NESM_STOP_PG;
373
374 /* Install the Interrupt handler */
375 ret = request_irq(dev->irq, __ei_interrupt, 0, dev->name, dev);
376 if (ret)
377 return ret;
378
379 for (i = 0; i < ETH_ALEN; i++)
380 dev->dev_addr[i] = SA_prom[i];
381
382 netdev_dbg(dev, "Found ethernet address: %pM\n", dev->dev_addr);
383
384 ei_local->name = "mcf8390";
385 ei_local->tx_start_page = start_page;
386 ei_local->stop_page = stop_page;
387 ei_local->word16 = 1;
388 ei_local->rx_start_page = start_page + TX_PAGES;
389 ei_local->reset_8390 = mcf8390_reset_8390;
390 ei_local->block_input = mcf8390_block_input;
391 ei_local->block_output = mcf8390_block_output;
392 ei_local->get_8390_hdr = mcf8390_get_8390_hdr;
393 ei_local->reg_offset = offsets;
394
395 dev->netdev_ops = &mcf8390_netdev_ops;
396 __NS8390_init(dev, 0);
397 ret = register_netdev(dev);
398 if (ret) {
399 free_irq(dev->irq, dev);
400 return ret;
401 }
402
403 netdev_info(dev, "addr=0x%08x irq=%d, Ethernet Address %pM\n",
404 addr, dev->irq, dev->dev_addr);
405 return 0;
406}
407
408static int mcf8390_probe(struct platform_device *pdev)
409{
410 struct net_device *dev;
411 struct ei_device *ei_local;
412 struct resource *mem, *irq;
413 resource_size_t msize;
414 int ret;
415
416 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
417 if (irq == NULL) {
418 dev_err(&pdev->dev, "no IRQ specified?\n");
419 return -ENXIO;
420 }
421
422 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
423 if (mem == NULL) {
424 dev_err(&pdev->dev, "no memory address specified?\n");
425 return -ENXIO;
426 }
427 msize = resource_size(mem);
428 if (!request_mem_region(mem->start, msize, pdev->name))
429 return -EBUSY;
430
431 dev = ____alloc_ei_netdev(0);
432 if (dev == NULL) {
433 release_mem_region(mem->start, msize);
434 return -ENOMEM;
435 }
436
437 SET_NETDEV_DEV(dev, &pdev->dev);
438 platform_set_drvdata(pdev, dev);
439 ei_local = netdev_priv(dev);
440
441 dev->irq = irq->start;
442 dev->base_addr = mem->start;
443
444 ret = mcf8390_init(dev);
445 if (ret) {
446 release_mem_region(mem->start, msize);
447 free_netdev(dev);
448 return ret;
449 }
450 return 0;
451}
452
453static int mcf8390_remove(struct platform_device *pdev)
454{
455 struct net_device *dev = platform_get_drvdata(pdev);
456 struct resource *mem;
457
458 unregister_netdev(dev);
459 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
460 if (mem)
461 release_mem_region(mem->start, resource_size(mem));
462 free_netdev(dev);
463 return 0;
464}
465
466static struct platform_driver mcf8390_drv = {
467 .driver = {
468 .name = "mcf8390",
469 .owner = THIS_MODULE,
470 },
471 .probe = mcf8390_probe,
472 .remove = mcf8390_remove,
473};
474
475module_platform_driver(mcf8390_drv);
476
477MODULE_DESCRIPTION("MCF8390 ColdFire NS8390 driver");
478MODULE_AUTHOR("Greg Ungerer <gerg@uclinux.org>");
479MODULE_LICENSE("GPL");
480MODULE_ALIAS("platform:mcf8390");
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 348501178089..9c77c736f171 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1014,7 +1014,7 @@ static int greth_set_mac_add(struct net_device *dev, void *p)
1014 struct greth_regs *regs; 1014 struct greth_regs *regs;
1015 1015
1016 greth = netdev_priv(dev); 1016 greth = netdev_priv(dev);
1017 regs = (struct greth_regs *) greth->regs; 1017 regs = greth->regs;
1018 1018
1019 if (!is_valid_ether_addr(addr->sa_data)) 1019 if (!is_valid_ether_addr(addr->sa_data))
1020 return -EADDRNOTAVAIL; 1020 return -EADDRNOTAVAIL;
@@ -1036,7 +1036,7 @@ static void greth_set_hash_filter(struct net_device *dev)
1036{ 1036{
1037 struct netdev_hw_addr *ha; 1037 struct netdev_hw_addr *ha;
1038 struct greth_private *greth = netdev_priv(dev); 1038 struct greth_private *greth = netdev_priv(dev);
1039 struct greth_regs *regs = (struct greth_regs *) greth->regs; 1039 struct greth_regs *regs = greth->regs;
1040 u32 mc_filter[2]; 1040 u32 mc_filter[2];
1041 unsigned int bitnr; 1041 unsigned int bitnr;
1042 1042
@@ -1055,7 +1055,7 @@ static void greth_set_multicast_list(struct net_device *dev)
1055{ 1055{
1056 int cfg; 1056 int cfg;
1057 struct greth_private *greth = netdev_priv(dev); 1057 struct greth_private *greth = netdev_priv(dev);
1058 struct greth_regs *regs = (struct greth_regs *) greth->regs; 1058 struct greth_regs *regs = greth->regs;
1059 1059
1060 cfg = GRETH_REGLOAD(regs->control); 1060 cfg = GRETH_REGLOAD(regs->control);
1061 if (dev->flags & IFF_PROMISC) 1061 if (dev->flags & IFF_PROMISC)
@@ -1414,7 +1414,7 @@ static int __devinit greth_of_probe(struct platform_device *ofdev)
1414 goto error1; 1414 goto error1;
1415 } 1415 }
1416 1416
1417 regs = (struct greth_regs *) greth->regs; 1417 regs = greth->regs;
1418 greth->irq = ofdev->archdata.irqs[0]; 1418 greth->irq = ofdev->archdata.irqs[0];
1419 1419
1420 dev_set_drvdata(greth->dev, dev); 1420 dev_set_drvdata(greth->dev, dev);
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 75299f500ee5..7203b522f234 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -623,7 +623,7 @@ static int lance_rx(struct net_device *dev)
623 skb_put(skb, len); /* make room */ 623 skb_put(skb, len); /* make room */
624 624
625 cp_from_buf(lp->type, skb->data, 625 cp_from_buf(lp->type, skb->data,
626 (char *)lp->rx_buf_ptr_cpu[entry], len); 626 lp->rx_buf_ptr_cpu[entry], len);
627 627
628 skb->protocol = eth_type_trans(skb, dev); 628 skb->protocol = eth_type_trans(skb, dev);
629 netif_rx(skb); 629 netif_rx(skb);
@@ -919,7 +919,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
919 *lib_ptr(ib, btx_ring[entry].length, lp->type) = (-len); 919 *lib_ptr(ib, btx_ring[entry].length, lp->type) = (-len);
920 *lib_ptr(ib, btx_ring[entry].misc, lp->type) = 0; 920 *lib_ptr(ib, btx_ring[entry].misc, lp->type) = 0;
921 921
922 cp_to_buf(lp->type, (char *)lp->tx_buf_ptr_cpu[entry], skb->data, len); 922 cp_to_buf(lp->type, lp->tx_buf_ptr_cpu[entry], skb->data, len);
923 923
924 /* Now, give the packet to the lance */ 924 /* Now, give the packet to the lance */
925 *lib_ptr(ib, btx_ring[entry].tmd1, lp->type) = 925 *lib_ptr(ib, btx_ring[entry].tmd1, lp->type) =
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index a6e2e840884e..5c728436b85e 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -873,10 +873,9 @@ lance_init_ring(struct net_device *dev, gfp_t gfp)
873 873
874 skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp); 874 skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp);
875 lp->rx_skbuff[i] = skb; 875 lp->rx_skbuff[i] = skb;
876 if (skb) { 876 if (skb)
877 skb->dev = dev;
878 rx_buff = skb->data; 877 rx_buff = skb->data;
879 } else 878 else
880 rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp); 879 rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
881 if (rx_buff == NULL) 880 if (rx_buff == NULL)
882 lp->rx_ring[i].base = 0; 881 lp->rx_ring[i].base = 0;
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index ab7ff8645ab1..a92ddee7f665 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -228,7 +228,7 @@ static int __devinit mace_probe(struct platform_device *pdev)
228 * bits are reversed. 228 * bits are reversed.
229 */ 229 */
230 230
231 addr = (void *)MACE_PROM; 231 addr = MACE_PROM;
232 232
233 for (j = 0; j < 6; ++j) { 233 for (j = 0; j < 6; ++j) {
234 u8 v = bitrev8(addr[j<<4]); 234 u8 v = bitrev8(addr[j<<4]);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
index ff9c73859d45..21e261ffbe10 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
@@ -199,7 +199,7 @@ int atl1c_read_mac_addr(struct atl1c_hw *hw)
199 199
200 err = atl1c_get_permanent_address(hw); 200 err = atl1c_get_permanent_address(hw);
201 if (err) 201 if (err)
202 random_ether_addr(hw->perm_mac_addr); 202 eth_random_addr(hw->perm_mac_addr);
203 203
204 memcpy(hw->mac_addr, hw->perm_mac_addr, sizeof(hw->perm_mac_addr)); 204 memcpy(hw->mac_addr, hw->perm_mac_addr, sizeof(hw->perm_mac_addr));
205 return err; 205 return err;
@@ -602,7 +602,7 @@ int atl1c_phy_reset(struct atl1c_hw *hw)
602 602
603int atl1c_phy_init(struct atl1c_hw *hw) 603int atl1c_phy_init(struct atl1c_hw *hw)
604{ 604{
605 struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter; 605 struct atl1c_adapter *adapter = hw->adapter;
606 struct pci_dev *pdev = adapter->pdev; 606 struct pci_dev *pdev = adapter->pdev;
607 int ret_val; 607 int ret_val;
608 u16 mii_bmcr_data = BMCR_RESET; 608 u16 mii_bmcr_data = BMCR_RESET;
@@ -696,7 +696,7 @@ int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex)
696/* select one link mode to get lower power consumption */ 696/* select one link mode to get lower power consumption */
697int atl1c_phy_to_ps_link(struct atl1c_hw *hw) 697int atl1c_phy_to_ps_link(struct atl1c_hw *hw)
698{ 698{
699 struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter; 699 struct atl1c_adapter *adapter = hw->adapter;
700 struct pci_dev *pdev = adapter->pdev; 700 struct pci_dev *pdev = adapter->pdev;
701 int ret = 0; 701 int ret = 0;
702 u16 autoneg_advertised = ADVERTISED_10baseT_Half; 702 u16 autoneg_advertised = ADVERTISED_10baseT_Half;
@@ -768,7 +768,7 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw)
768 768
769int atl1c_power_saving(struct atl1c_hw *hw, u32 wufc) 769int atl1c_power_saving(struct atl1c_hw *hw, u32 wufc)
770{ 770{
771 struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter; 771 struct atl1c_adapter *adapter = hw->adapter;
772 struct pci_dev *pdev = adapter->pdev; 772 struct pci_dev *pdev = adapter->pdev;
773 u32 master_ctrl, mac_ctrl, phy_ctrl; 773 u32 master_ctrl, mac_ctrl, phy_ctrl;
774 u32 wol_ctrl, speed; 774 u32 wol_ctrl, speed;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 1f78b63d5efe..36d3783ebfa0 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -166,7 +166,7 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
166 msleep(5); 166 msleep(5);
167} 167}
168 168
169/* 169/**
170 * atl1c_irq_enable - Enable default interrupt generation settings 170 * atl1c_irq_enable - Enable default interrupt generation settings
171 * @adapter: board private structure 171 * @adapter: board private structure
172 */ 172 */
@@ -179,7 +179,7 @@ static inline void atl1c_irq_enable(struct atl1c_adapter *adapter)
179 } 179 }
180} 180}
181 181
182/* 182/**
183 * atl1c_irq_disable - Mask off interrupt generation on the NIC 183 * atl1c_irq_disable - Mask off interrupt generation on the NIC
184 * @adapter: board private structure 184 * @adapter: board private structure
185 */ 185 */
@@ -192,7 +192,7 @@ static inline void atl1c_irq_disable(struct atl1c_adapter *adapter)
192 synchronize_irq(adapter->pdev->irq); 192 synchronize_irq(adapter->pdev->irq);
193} 193}
194 194
195/* 195/**
196 * atl1c_irq_reset - reset interrupt confiure on the NIC 196 * atl1c_irq_reset - reset interrupt confiure on the NIC
197 * @adapter: board private structure 197 * @adapter: board private structure
198 */ 198 */
@@ -220,7 +220,7 @@ static u32 atl1c_wait_until_idle(struct atl1c_hw *hw, u32 modu_ctrl)
220 return data; 220 return data;
221} 221}
222 222
223/* 223/**
224 * atl1c_phy_config - Timer Call-back 224 * atl1c_phy_config - Timer Call-back
225 * @data: pointer to netdev cast into an unsigned long 225 * @data: pointer to netdev cast into an unsigned long
226 */ 226 */
@@ -360,7 +360,7 @@ static void atl1c_del_timer(struct atl1c_adapter *adapter)
360} 360}
361 361
362 362
363/* 363/**
364 * atl1c_tx_timeout - Respond to a Tx Hang 364 * atl1c_tx_timeout - Respond to a Tx Hang
365 * @netdev: network interface device structure 365 * @netdev: network interface device structure
366 */ 366 */
@@ -373,7 +373,7 @@ static void atl1c_tx_timeout(struct net_device *netdev)
373 schedule_work(&adapter->common_task); 373 schedule_work(&adapter->common_task);
374} 374}
375 375
376/* 376/**
377 * atl1c_set_multi - Multicast and Promiscuous mode set 377 * atl1c_set_multi - Multicast and Promiscuous mode set
378 * @netdev: network interface device structure 378 * @netdev: network interface device structure
379 * 379 *
@@ -452,7 +452,7 @@ static void atl1c_restore_vlan(struct atl1c_adapter *adapter)
452 atl1c_vlan_mode(adapter->netdev, adapter->netdev->features); 452 atl1c_vlan_mode(adapter->netdev, adapter->netdev->features);
453} 453}
454 454
455/* 455/**
456 * atl1c_set_mac - Change the Ethernet Address of the NIC 456 * atl1c_set_mac - Change the Ethernet Address of the NIC
457 * @netdev: network interface device structure 457 * @netdev: network interface device structure
458 * @p: pointer to an address structure 458 * @p: pointer to an address structure
@@ -517,7 +517,7 @@ static int atl1c_set_features(struct net_device *netdev,
517 return 0; 517 return 0;
518} 518}
519 519
520/* 520/**
521 * atl1c_change_mtu - Change the Maximum Transfer Unit 521 * atl1c_change_mtu - Change the Maximum Transfer Unit
522 * @netdev: network interface device structure 522 * @netdev: network interface device structure
523 * @new_mtu: new value for maximum frame size 523 * @new_mtu: new value for maximum frame size
@@ -576,12 +576,6 @@ static void atl1c_mdio_write(struct net_device *netdev, int phy_id,
576 atl1c_write_phy_reg(&adapter->hw, reg_num, val); 576 atl1c_write_phy_reg(&adapter->hw, reg_num, val);
577} 577}
578 578
579/*
580 * atl1c_mii_ioctl -
581 * @netdev:
582 * @ifreq:
583 * @cmd:
584 */
585static int atl1c_mii_ioctl(struct net_device *netdev, 579static int atl1c_mii_ioctl(struct net_device *netdev,
586 struct ifreq *ifr, int cmd) 580 struct ifreq *ifr, int cmd)
587{ 581{
@@ -632,12 +626,6 @@ out:
632 return retval; 626 return retval;
633} 627}
634 628
635/*
636 * atl1c_ioctl -
637 * @netdev:
638 * @ifreq:
639 * @cmd:
640 */
641static int atl1c_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 629static int atl1c_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
642{ 630{
643 switch (cmd) { 631 switch (cmd) {
@@ -650,7 +638,7 @@ static int atl1c_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
650 } 638 }
651} 639}
652 640
653/* 641/**
654 * atl1c_alloc_queues - Allocate memory for all rings 642 * atl1c_alloc_queues - Allocate memory for all rings
655 * @adapter: board private structure to initialize 643 * @adapter: board private structure to initialize
656 * 644 *
@@ -754,7 +742,7 @@ static void __devinit atl1c_patch_assign(struct atl1c_hw *hw)
754 i++; 742 i++;
755 } 743 }
756} 744}
757/* 745/**
758 * atl1c_sw_init - Initialize general software structures (struct atl1c_adapter) 746 * atl1c_sw_init - Initialize general software structures (struct atl1c_adapter)
759 * @adapter: board private structure to initialize 747 * @adapter: board private structure to initialize
760 * 748 *
@@ -852,7 +840,7 @@ static inline void atl1c_clean_buffer(struct pci_dev *pdev,
852 buffer_info->skb = NULL; 840 buffer_info->skb = NULL;
853 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE); 841 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
854} 842}
855/* 843/**
856 * atl1c_clean_tx_ring - Free Tx-skb 844 * atl1c_clean_tx_ring - Free Tx-skb
857 * @adapter: board private structure 845 * @adapter: board private structure
858 */ 846 */
@@ -877,7 +865,7 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
877 tpd_ring->next_to_use = 0; 865 tpd_ring->next_to_use = 0;
878} 866}
879 867
880/* 868/**
881 * atl1c_clean_rx_ring - Free rx-reservation skbs 869 * atl1c_clean_rx_ring - Free rx-reservation skbs
882 * @adapter: board private structure 870 * @adapter: board private structure
883 */ 871 */
@@ -930,7 +918,7 @@ static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
930 } 918 }
931} 919}
932 920
933/* 921/**
934 * atl1c_free_ring_resources - Free Tx / RX descriptor Resources 922 * atl1c_free_ring_resources - Free Tx / RX descriptor Resources
935 * @adapter: board private structure 923 * @adapter: board private structure
936 * 924 *
@@ -953,7 +941,7 @@ static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
953 } 941 }
954} 942}
955 943
956/* 944/**
957 * atl1c_setup_mem_resources - allocate Tx / RX descriptor resources 945 * atl1c_setup_mem_resources - allocate Tx / RX descriptor resources
958 * @adapter: board private structure 946 * @adapter: board private structure
959 * 947 *
@@ -988,12 +976,12 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
988 } 976 }
989 for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) { 977 for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) {
990 tpd_ring[i].buffer_info = 978 tpd_ring[i].buffer_info =
991 (struct atl1c_buffer *) (tpd_ring->buffer_info + count); 979 (tpd_ring->buffer_info + count);
992 count += tpd_ring[i].count; 980 count += tpd_ring[i].count;
993 } 981 }
994 982
995 rfd_ring->buffer_info = 983 rfd_ring->buffer_info =
996 (struct atl1c_buffer *) (tpd_ring->buffer_info + count); 984 (tpd_ring->buffer_info + count);
997 count += rfd_ring->count; 985 count += rfd_ring->count;
998 rx_desc_count += rfd_ring->count; 986 rx_desc_count += rfd_ring->count;
999 987
@@ -1226,7 +1214,7 @@ static void atl1c_start_mac(struct atl1c_adapter *adapter)
1226 */ 1214 */
1227static int atl1c_reset_mac(struct atl1c_hw *hw) 1215static int atl1c_reset_mac(struct atl1c_hw *hw)
1228{ 1216{
1229 struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter; 1217 struct atl1c_adapter *adapter = hw->adapter;
1230 struct pci_dev *pdev = adapter->pdev; 1218 struct pci_dev *pdev = adapter->pdev;
1231 u32 ctrl_data = 0; 1219 u32 ctrl_data = 0;
1232 1220
@@ -1362,7 +1350,7 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed)
1362 return; 1350 return;
1363} 1351}
1364 1352
1365/* 1353/**
1366 * atl1c_configure - Configure Transmit&Receive Unit after Reset 1354 * atl1c_configure - Configure Transmit&Receive Unit after Reset
1367 * @adapter: board private structure 1355 * @adapter: board private structure
1368 * 1356 *
@@ -1476,7 +1464,7 @@ static void atl1c_update_hw_stats(struct atl1c_adapter *adapter)
1476 } 1464 }
1477} 1465}
1478 1466
1479/* 1467/**
1480 * atl1c_get_stats - Get System Network Statistics 1468 * atl1c_get_stats - Get System Network Statistics
1481 * @netdev: network interface device structure 1469 * @netdev: network interface device structure
1482 * 1470 *
@@ -1530,8 +1518,7 @@ static inline void atl1c_clear_phy_int(struct atl1c_adapter *adapter)
1530static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter, 1518static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
1531 enum atl1c_trans_queue type) 1519 enum atl1c_trans_queue type)
1532{ 1520{
1533 struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *) 1521 struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
1534 &adapter->tpd_ring[type];
1535 struct atl1c_buffer *buffer_info; 1522 struct atl1c_buffer *buffer_info;
1536 struct pci_dev *pdev = adapter->pdev; 1523 struct pci_dev *pdev = adapter->pdev;
1537 u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); 1524 u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
@@ -1558,11 +1545,10 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
1558 return true; 1545 return true;
1559} 1546}
1560 1547
1561/* 1548/**
1562 * atl1c_intr - Interrupt Handler 1549 * atl1c_intr - Interrupt Handler
1563 * @irq: interrupt number 1550 * @irq: interrupt number
1564 * @data: pointer to a network interface device structure 1551 * @data: pointer to a network interface device structure
1565 * @pt_regs: CPU registers structure
1566 */ 1552 */
1567static irqreturn_t atl1c_intr(int irq, void *data) 1553static irqreturn_t atl1c_intr(int irq, void *data)
1568{ 1554{
@@ -1813,9 +1799,8 @@ rrs_checked:
1813 atl1c_alloc_rx_buffer(adapter); 1799 atl1c_alloc_rx_buffer(adapter);
1814} 1800}
1815 1801
1816/* 1802/**
1817 * atl1c_clean - NAPI Rx polling callback 1803 * atl1c_clean - NAPI Rx polling callback
1818 * @adapter: board private structure
1819 */ 1804 */
1820static int atl1c_clean(struct napi_struct *napi, int budget) 1805static int atl1c_clean(struct napi_struct *napi, int budget)
1821{ 1806{
@@ -2270,7 +2255,7 @@ static void atl1c_down(struct atl1c_adapter *adapter)
2270 atl1c_reset_dma_ring(adapter); 2255 atl1c_reset_dma_ring(adapter);
2271} 2256}
2272 2257
2273/* 2258/**
2274 * atl1c_open - Called when a network interface is made active 2259 * atl1c_open - Called when a network interface is made active
2275 * @netdev: network interface device structure 2260 * @netdev: network interface device structure
2276 * 2261 *
@@ -2309,7 +2294,7 @@ err_up:
2309 return err; 2294 return err;
2310} 2295}
2311 2296
2312/* 2297/**
2313 * atl1c_close - Disables a network interface 2298 * atl1c_close - Disables a network interface
2314 * @netdev: network interface device structure 2299 * @netdev: network interface device structure
2315 * 2300 *
@@ -2432,7 +2417,7 @@ static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
2432 return 0; 2417 return 0;
2433} 2418}
2434 2419
2435/* 2420/**
2436 * atl1c_probe - Device Initialization Routine 2421 * atl1c_probe - Device Initialization Routine
2437 * @pdev: PCI device information struct 2422 * @pdev: PCI device information struct
2438 * @ent: entry in atl1c_pci_tbl 2423 * @ent: entry in atl1c_pci_tbl
@@ -2579,7 +2564,7 @@ err_dma:
2579 return err; 2564 return err;
2580} 2565}
2581 2566
2582/* 2567/**
2583 * atl1c_remove - Device Removal Routine 2568 * atl1c_remove - Device Removal Routine
2584 * @pdev: PCI device information struct 2569 * @pdev: PCI device information struct
2585 * 2570 *
@@ -2605,7 +2590,7 @@ static void __devexit atl1c_remove(struct pci_dev *pdev)
2605 free_netdev(netdev); 2590 free_netdev(netdev);
2606} 2591}
2607 2592
2608/* 2593/**
2609 * atl1c_io_error_detected - called when PCI error is detected 2594 * atl1c_io_error_detected - called when PCI error is detected
2610 * @pdev: Pointer to PCI device 2595 * @pdev: Pointer to PCI device
2611 * @state: The current pci connection state 2596 * @state: The current pci connection state
@@ -2633,7 +2618,7 @@ static pci_ers_result_t atl1c_io_error_detected(struct pci_dev *pdev,
2633 return PCI_ERS_RESULT_NEED_RESET; 2618 return PCI_ERS_RESULT_NEED_RESET;
2634} 2619}
2635 2620
2636/* 2621/**
2637 * atl1c_io_slot_reset - called after the pci bus has been reset. 2622 * atl1c_io_slot_reset - called after the pci bus has been reset.
2638 * @pdev: Pointer to PCI device 2623 * @pdev: Pointer to PCI device
2639 * 2624 *
@@ -2661,7 +2646,7 @@ static pci_ers_result_t atl1c_io_slot_reset(struct pci_dev *pdev)
2661 return PCI_ERS_RESULT_RECOVERED; 2646 return PCI_ERS_RESULT_RECOVERED;
2662} 2647}
2663 2648
2664/* 2649/**
2665 * atl1c_io_resume - called when traffic can start flowing again. 2650 * atl1c_io_resume - called when traffic can start flowing again.
2666 * @pdev: Pointer to PCI device 2651 * @pdev: Pointer to PCI device
2667 * 2652 *
@@ -2704,7 +2689,7 @@ static struct pci_driver atl1c_driver = {
2704 .driver.pm = &atl1c_pm_ops, 2689 .driver.pm = &atl1c_pm_ops,
2705}; 2690};
2706 2691
2707/* 2692/**
2708 * atl1c_init_module - Driver Registration Routine 2693 * atl1c_init_module - Driver Registration Routine
2709 * 2694 *
2710 * atl1c_init_module is the first routine called when the driver is 2695 * atl1c_init_module is the first routine called when the driver is
@@ -2715,7 +2700,7 @@ static int __init atl1c_init_module(void)
2715 return pci_register_driver(&atl1c_driver); 2700 return pci_register_driver(&atl1c_driver);
2716} 2701}
2717 2702
2718/* 2703/**
2719 * atl1c_exit_module - Driver Exit Cleanup Routine 2704 * atl1c_exit_module - Driver Exit Cleanup Routine
2720 * 2705 *
2721 * atl1c_exit_module is called just before the driver is removed 2706 * atl1c_exit_module is called just before the driver is removed
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
index 6e61f9f9ebb5..82b23861bf55 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
@@ -268,7 +268,7 @@ static int atl1e_set_eeprom(struct net_device *netdev,
268 if (eeprom_buff == NULL) 268 if (eeprom_buff == NULL)
269 return -ENOMEM; 269 return -ENOMEM;
270 270
271 ptr = (u32 *)eeprom_buff; 271 ptr = eeprom_buff;
272 272
273 if (eeprom->offset & 3) { 273 if (eeprom->offset & 3) {
274 /* need read/modify/write of first changed EEPROM word */ 274 /* need read/modify/write of first changed EEPROM word */
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 1220e511ced6..a98acc8a956f 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -89,7 +89,7 @@ static const u16 atl1e_pay_load_size[] = {
89 128, 256, 512, 1024, 2048, 4096, 89 128, 256, 512, 1024, 2048, 4096,
90}; 90};
91 91
92/* 92/**
93 * atl1e_irq_enable - Enable default interrupt generation settings 93 * atl1e_irq_enable - Enable default interrupt generation settings
94 * @adapter: board private structure 94 * @adapter: board private structure
95 */ 95 */
@@ -102,7 +102,7 @@ static inline void atl1e_irq_enable(struct atl1e_adapter *adapter)
102 } 102 }
103} 103}
104 104
105/* 105/**
106 * atl1e_irq_disable - Mask off interrupt generation on the NIC 106 * atl1e_irq_disable - Mask off interrupt generation on the NIC
107 * @adapter: board private structure 107 * @adapter: board private structure
108 */ 108 */
@@ -114,7 +114,7 @@ static inline void atl1e_irq_disable(struct atl1e_adapter *adapter)
114 synchronize_irq(adapter->pdev->irq); 114 synchronize_irq(adapter->pdev->irq);
115} 115}
116 116
117/* 117/**
118 * atl1e_irq_reset - reset interrupt confiure on the NIC 118 * atl1e_irq_reset - reset interrupt confiure on the NIC
119 * @adapter: board private structure 119 * @adapter: board private structure
120 */ 120 */
@@ -126,7 +126,7 @@ static inline void atl1e_irq_reset(struct atl1e_adapter *adapter)
126 AT_WRITE_FLUSH(&adapter->hw); 126 AT_WRITE_FLUSH(&adapter->hw);
127} 127}
128 128
129/* 129/**
130 * atl1e_phy_config - Timer Call-back 130 * atl1e_phy_config - Timer Call-back
131 * @data: pointer to netdev cast into an unsigned long 131 * @data: pointer to netdev cast into an unsigned long
132 */ 132 */
@@ -210,7 +210,7 @@ static int atl1e_check_link(struct atl1e_adapter *adapter)
210 return 0; 210 return 0;
211} 211}
212 212
213/* 213/**
214 * atl1e_link_chg_task - deal with link change event Out of interrupt context 214 * atl1e_link_chg_task - deal with link change event Out of interrupt context
215 * @netdev: network interface device structure 215 * @netdev: network interface device structure
216 */ 216 */
@@ -259,7 +259,7 @@ static void atl1e_cancel_work(struct atl1e_adapter *adapter)
259 cancel_work_sync(&adapter->link_chg_task); 259 cancel_work_sync(&adapter->link_chg_task);
260} 260}
261 261
262/* 262/**
263 * atl1e_tx_timeout - Respond to a Tx Hang 263 * atl1e_tx_timeout - Respond to a Tx Hang
264 * @netdev: network interface device structure 264 * @netdev: network interface device structure
265 */ 265 */
@@ -271,7 +271,7 @@ static void atl1e_tx_timeout(struct net_device *netdev)
271 schedule_work(&adapter->reset_task); 271 schedule_work(&adapter->reset_task);
272} 272}
273 273
274/* 274/**
275 * atl1e_set_multi - Multicast and Promiscuous mode set 275 * atl1e_set_multi - Multicast and Promiscuous mode set
276 * @netdev: network interface device structure 276 * @netdev: network interface device structure
277 * 277 *
@@ -345,7 +345,7 @@ static void atl1e_restore_vlan(struct atl1e_adapter *adapter)
345 atl1e_vlan_mode(adapter->netdev, adapter->netdev->features); 345 atl1e_vlan_mode(adapter->netdev, adapter->netdev->features);
346} 346}
347 347
348/* 348/**
349 * atl1e_set_mac - Change the Ethernet Address of the NIC 349 * atl1e_set_mac - Change the Ethernet Address of the NIC
350 * @netdev: network interface device structure 350 * @netdev: network interface device structure
351 * @p: pointer to an address structure 351 * @p: pointer to an address structure
@@ -397,7 +397,7 @@ static int atl1e_set_features(struct net_device *netdev,
397 return 0; 397 return 0;
398} 398}
399 399
400/* 400/**
401 * atl1e_change_mtu - Change the Maximum Transfer Unit 401 * atl1e_change_mtu - Change the Maximum Transfer Unit
402 * @netdev: network interface device structure 402 * @netdev: network interface device structure
403 * @new_mtu: new value for maximum frame size 403 * @new_mtu: new value for maximum frame size
@@ -449,12 +449,6 @@ static void atl1e_mdio_write(struct net_device *netdev, int phy_id,
449 atl1e_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val); 449 atl1e_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val);
450} 450}
451 451
452/*
453 * atl1e_mii_ioctl -
454 * @netdev:
455 * @ifreq:
456 * @cmd:
457 */
458static int atl1e_mii_ioctl(struct net_device *netdev, 452static int atl1e_mii_ioctl(struct net_device *netdev,
459 struct ifreq *ifr, int cmd) 453 struct ifreq *ifr, int cmd)
460{ 454{
@@ -505,12 +499,6 @@ out:
505 499
506} 500}
507 501
508/*
509 * atl1e_ioctl -
510 * @netdev:
511 * @ifreq:
512 * @cmd:
513 */
514static int atl1e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 502static int atl1e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
515{ 503{
516 switch (cmd) { 504 switch (cmd) {
@@ -541,7 +529,7 @@ static void atl1e_setup_pcicmd(struct pci_dev *pdev)
541 msleep(1); 529 msleep(1);
542} 530}
543 531
544/* 532/**
545 * atl1e_alloc_queues - Allocate memory for all rings 533 * atl1e_alloc_queues - Allocate memory for all rings
546 * @adapter: board private structure to initialize 534 * @adapter: board private structure to initialize
547 * 535 *
@@ -551,7 +539,7 @@ static int __devinit atl1e_alloc_queues(struct atl1e_adapter *adapter)
551 return 0; 539 return 0;
552} 540}
553 541
554/* 542/**
555 * atl1e_sw_init - Initialize general software structures (struct atl1e_adapter) 543 * atl1e_sw_init - Initialize general software structures (struct atl1e_adapter)
556 * @adapter: board private structure to initialize 544 * @adapter: board private structure to initialize
557 * 545 *
@@ -635,14 +623,13 @@ static int __devinit atl1e_sw_init(struct atl1e_adapter *adapter)
635 return 0; 623 return 0;
636} 624}
637 625
638/* 626/**
639 * atl1e_clean_tx_ring - Free Tx-skb 627 * atl1e_clean_tx_ring - Free Tx-skb
640 * @adapter: board private structure 628 * @adapter: board private structure
641 */ 629 */
642static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter) 630static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter)
643{ 631{
644 struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *) 632 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
645 &adapter->tx_ring;
646 struct atl1e_tx_buffer *tx_buffer = NULL; 633 struct atl1e_tx_buffer *tx_buffer = NULL;
647 struct pci_dev *pdev = adapter->pdev; 634 struct pci_dev *pdev = adapter->pdev;
648 u16 index, ring_count; 635 u16 index, ring_count;
@@ -679,14 +666,14 @@ static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter)
679 ring_count); 666 ring_count);
680} 667}
681 668
682/* 669/**
683 * atl1e_clean_rx_ring - Free rx-reservation skbs 670 * atl1e_clean_rx_ring - Free rx-reservation skbs
684 * @adapter: board private structure 671 * @adapter: board private structure
685 */ 672 */
686static void atl1e_clean_rx_ring(struct atl1e_adapter *adapter) 673static void atl1e_clean_rx_ring(struct atl1e_adapter *adapter)
687{ 674{
688 struct atl1e_rx_ring *rx_ring = 675 struct atl1e_rx_ring *rx_ring =
689 (struct atl1e_rx_ring *)&adapter->rx_ring; 676 &adapter->rx_ring;
690 struct atl1e_rx_page_desc *rx_page_desc = rx_ring->rx_page_desc; 677 struct atl1e_rx_page_desc *rx_page_desc = rx_ring->rx_page_desc;
691 u16 i, j; 678 u16 i, j;
692 679
@@ -762,7 +749,7 @@ static void atl1e_init_ring_ptrs(struct atl1e_adapter *adapter)
762 } 749 }
763} 750}
764 751
765/* 752/**
766 * atl1e_free_ring_resources - Free Tx / RX descriptor Resources 753 * atl1e_free_ring_resources - Free Tx / RX descriptor Resources
767 * @adapter: board private structure 754 * @adapter: board private structure
768 * 755 *
@@ -787,7 +774,7 @@ static void atl1e_free_ring_resources(struct atl1e_adapter *adapter)
787 } 774 }
788} 775}
789 776
790/* 777/**
791 * atl1e_setup_mem_resources - allocate Tx / RX descriptor resources 778 * atl1e_setup_mem_resources - allocate Tx / RX descriptor resources
792 * @adapter: board private structure 779 * @adapter: board private structure
793 * 780 *
@@ -884,14 +871,12 @@ failed:
884 return err; 871 return err;
885} 872}
886 873
887static inline void atl1e_configure_des_ring(const struct atl1e_adapter *adapter) 874static inline void atl1e_configure_des_ring(struct atl1e_adapter *adapter)
888{ 875{
889 876
890 struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw; 877 struct atl1e_hw *hw = &adapter->hw;
891 struct atl1e_rx_ring *rx_ring = 878 struct atl1e_rx_ring *rx_ring = &adapter->rx_ring;
892 (struct atl1e_rx_ring *)&adapter->rx_ring; 879 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
893 struct atl1e_tx_ring *tx_ring =
894 (struct atl1e_tx_ring *)&adapter->tx_ring;
895 struct atl1e_rx_page_desc *rx_page_desc = NULL; 880 struct atl1e_rx_page_desc *rx_page_desc = NULL;
896 int i, j; 881 int i, j;
897 882
@@ -932,7 +917,7 @@ static inline void atl1e_configure_des_ring(const struct atl1e_adapter *adapter)
932 917
933static inline void atl1e_configure_tx(struct atl1e_adapter *adapter) 918static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
934{ 919{
935 struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw; 920 struct atl1e_hw *hw = &adapter->hw;
936 u32 dev_ctrl_data = 0; 921 u32 dev_ctrl_data = 0;
937 u32 max_pay_load = 0; 922 u32 max_pay_load = 0;
938 u32 jumbo_thresh = 0; 923 u32 jumbo_thresh = 0;
@@ -975,7 +960,7 @@ static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
975 960
976static inline void atl1e_configure_rx(struct atl1e_adapter *adapter) 961static inline void atl1e_configure_rx(struct atl1e_adapter *adapter)
977{ 962{
978 struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw; 963 struct atl1e_hw *hw = &adapter->hw;
979 u32 rxf_len = 0; 964 u32 rxf_len = 0;
980 u32 rxf_low = 0; 965 u32 rxf_low = 0;
981 u32 rxf_high = 0; 966 u32 rxf_high = 0;
@@ -1078,7 +1063,7 @@ static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
1078 AT_WRITE_REG(hw, REG_MAC_CTRL, value); 1063 AT_WRITE_REG(hw, REG_MAC_CTRL, value);
1079} 1064}
1080 1065
1081/* 1066/**
1082 * atl1e_configure - Configure Transmit&Receive Unit after Reset 1067 * atl1e_configure - Configure Transmit&Receive Unit after Reset
1083 * @adapter: board private structure 1068 * @adapter: board private structure
1084 * 1069 *
@@ -1148,7 +1133,7 @@ static int atl1e_configure(struct atl1e_adapter *adapter)
1148 return 0; 1133 return 0;
1149} 1134}
1150 1135
1151/* 1136/**
1152 * atl1e_get_stats - Get System Network Statistics 1137 * atl1e_get_stats - Get System Network Statistics
1153 * @netdev: network interface device structure 1138 * @netdev: network interface device structure
1154 * 1139 *
@@ -1224,8 +1209,7 @@ static inline void atl1e_clear_phy_int(struct atl1e_adapter *adapter)
1224 1209
1225static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter) 1210static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter)
1226{ 1211{
1227 struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *) 1212 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
1228 &adapter->tx_ring;
1229 struct atl1e_tx_buffer *tx_buffer = NULL; 1213 struct atl1e_tx_buffer *tx_buffer = NULL;
1230 u16 hw_next_to_clean = AT_READ_REGW(&adapter->hw, REG_TPD_CONS_IDX); 1214 u16 hw_next_to_clean = AT_READ_REGW(&adapter->hw, REG_TPD_CONS_IDX);
1231 u16 next_to_clean = atomic_read(&tx_ring->next_to_clean); 1215 u16 next_to_clean = atomic_read(&tx_ring->next_to_clean);
@@ -1261,11 +1245,10 @@ static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter)
1261 return true; 1245 return true;
1262} 1246}
1263 1247
1264/* 1248/**
1265 * atl1e_intr - Interrupt Handler 1249 * atl1e_intr - Interrupt Handler
1266 * @irq: interrupt number 1250 * @irq: interrupt number
1267 * @data: pointer to a network interface device structure 1251 * @data: pointer to a network interface device structure
1268 * @pt_regs: CPU registers structure
1269 */ 1252 */
1270static irqreturn_t atl1e_intr(int irq, void *data) 1253static irqreturn_t atl1e_intr(int irq, void *data)
1271{ 1254{
@@ -1384,15 +1367,14 @@ static struct atl1e_rx_page *atl1e_get_rx_page(struct atl1e_adapter *adapter,
1384 (struct atl1e_rx_page_desc *) adapter->rx_ring.rx_page_desc; 1367 (struct atl1e_rx_page_desc *) adapter->rx_ring.rx_page_desc;
1385 u8 rx_using = rx_page_desc[que].rx_using; 1368 u8 rx_using = rx_page_desc[que].rx_using;
1386 1369
1387 return (struct atl1e_rx_page *)&(rx_page_desc[que].rx_page[rx_using]); 1370 return &(rx_page_desc[que].rx_page[rx_using]);
1388} 1371}
1389 1372
1390static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que, 1373static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
1391 int *work_done, int work_to_do) 1374 int *work_done, int work_to_do)
1392{ 1375{
1393 struct net_device *netdev = adapter->netdev; 1376 struct net_device *netdev = adapter->netdev;
1394 struct atl1e_rx_ring *rx_ring = (struct atl1e_rx_ring *) 1377 struct atl1e_rx_ring *rx_ring = &adapter->rx_ring;
1395 &adapter->rx_ring;
1396 struct atl1e_rx_page_desc *rx_page_desc = 1378 struct atl1e_rx_page_desc *rx_page_desc =
1397 (struct atl1e_rx_page_desc *) rx_ring->rx_page_desc; 1379 (struct atl1e_rx_page_desc *) rx_ring->rx_page_desc;
1398 struct sk_buff *skb = NULL; 1380 struct sk_buff *skb = NULL;
@@ -1494,9 +1476,8 @@ fatal_err:
1494 schedule_work(&adapter->reset_task); 1476 schedule_work(&adapter->reset_task);
1495} 1477}
1496 1478
1497/* 1479/**
1498 * atl1e_clean - NAPI Rx polling callback 1480 * atl1e_clean - NAPI Rx polling callback
1499 * @adapter: board private structure
1500 */ 1481 */
1501static int atl1e_clean(struct napi_struct *napi, int budget) 1482static int atl1e_clean(struct napi_struct *napi, int budget)
1502{ 1483{
@@ -1576,7 +1557,7 @@ static struct atl1e_tpd_desc *atl1e_get_tpd(struct atl1e_adapter *adapter)
1576 tx_ring->next_to_use = 0; 1557 tx_ring->next_to_use = 0;
1577 1558
1578 memset(&tx_ring->desc[next_to_use], 0, sizeof(struct atl1e_tpd_desc)); 1559 memset(&tx_ring->desc[next_to_use], 0, sizeof(struct atl1e_tpd_desc));
1579 return (struct atl1e_tpd_desc *)&tx_ring->desc[next_to_use]; 1560 return &tx_ring->desc[next_to_use];
1580} 1561}
1581 1562
1582static struct atl1e_tx_buffer * 1563static struct atl1e_tx_buffer *
@@ -1961,7 +1942,7 @@ void atl1e_down(struct atl1e_adapter *adapter)
1961 atl1e_clean_rx_ring(adapter); 1942 atl1e_clean_rx_ring(adapter);
1962} 1943}
1963 1944
1964/* 1945/**
1965 * atl1e_open - Called when a network interface is made active 1946 * atl1e_open - Called when a network interface is made active
1966 * @netdev: network interface device structure 1947 * @netdev: network interface device structure
1967 * 1948 *
@@ -2007,7 +1988,7 @@ err_req_irq:
2007 return err; 1988 return err;
2008} 1989}
2009 1990
2010/* 1991/**
2011 * atl1e_close - Disables a network interface 1992 * atl1e_close - Disables a network interface
2012 * @netdev: network interface device structure 1993 * @netdev: network interface device structure
2013 * 1994 *
@@ -2061,8 +2042,8 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
2061 2042
2062 if (wufc) { 2043 if (wufc) {
2063 /* get link status */ 2044 /* get link status */
2064 atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); 2045 atl1e_read_phy_reg(hw, MII_BMSR, &mii_bmsr_data);
2065 atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); 2046 atl1e_read_phy_reg(hw, MII_BMSR, &mii_bmsr_data);
2066 2047
2067 mii_advertise_data = ADVERTISE_10HALF; 2048 mii_advertise_data = ADVERTISE_10HALF;
2068 2049
@@ -2086,7 +2067,7 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
2086 for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) { 2067 for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) {
2087 msleep(100); 2068 msleep(100);
2088 atl1e_read_phy_reg(hw, MII_BMSR, 2069 atl1e_read_phy_reg(hw, MII_BMSR,
2089 (u16 *)&mii_bmsr_data); 2070 &mii_bmsr_data);
2090 if (mii_bmsr_data & BMSR_LSTATUS) 2071 if (mii_bmsr_data & BMSR_LSTATUS)
2091 break; 2072 break;
2092 } 2073 }
@@ -2243,7 +2224,7 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
2243 return 0; 2224 return 0;
2244} 2225}
2245 2226
2246/* 2227/**
2247 * atl1e_probe - Device Initialization Routine 2228 * atl1e_probe - Device Initialization Routine
2248 * @pdev: PCI device information struct 2229 * @pdev: PCI device information struct
2249 * @ent: entry in atl1e_pci_tbl 2230 * @ent: entry in atl1e_pci_tbl
@@ -2397,7 +2378,7 @@ err_dma:
2397 return err; 2378 return err;
2398} 2379}
2399 2380
2400/* 2381/**
2401 * atl1e_remove - Device Removal Routine 2382 * atl1e_remove - Device Removal Routine
2402 * @pdev: PCI device information struct 2383 * @pdev: PCI device information struct
2403 * 2384 *
@@ -2429,7 +2410,7 @@ static void __devexit atl1e_remove(struct pci_dev *pdev)
2429 pci_disable_device(pdev); 2410 pci_disable_device(pdev);
2430} 2411}
2431 2412
2432/* 2413/**
2433 * atl1e_io_error_detected - called when PCI error is detected 2414 * atl1e_io_error_detected - called when PCI error is detected
2434 * @pdev: Pointer to PCI device 2415 * @pdev: Pointer to PCI device
2435 * @state: The current pci connection state 2416 * @state: The current pci connection state
@@ -2457,7 +2438,7 @@ atl1e_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2457 return PCI_ERS_RESULT_NEED_RESET; 2438 return PCI_ERS_RESULT_NEED_RESET;
2458} 2439}
2459 2440
2460/* 2441/**
2461 * atl1e_io_slot_reset - called after the pci bus has been reset. 2442 * atl1e_io_slot_reset - called after the pci bus has been reset.
2462 * @pdev: Pointer to PCI device 2443 * @pdev: Pointer to PCI device
2463 * 2444 *
@@ -2484,7 +2465,7 @@ static pci_ers_result_t atl1e_io_slot_reset(struct pci_dev *pdev)
2484 return PCI_ERS_RESULT_RECOVERED; 2465 return PCI_ERS_RESULT_RECOVERED;
2485} 2466}
2486 2467
2487/* 2468/**
2488 * atl1e_io_resume - called when traffic can start flowing again. 2469 * atl1e_io_resume - called when traffic can start flowing again.
2489 * @pdev: Pointer to PCI device 2470 * @pdev: Pointer to PCI device
2490 * 2471 *
@@ -2528,7 +2509,7 @@ static struct pci_driver atl1e_driver = {
2528 .err_handler = &atl1e_err_handler 2509 .err_handler = &atl1e_err_handler
2529}; 2510};
2530 2511
2531/* 2512/**
2532 * atl1e_init_module - Driver Registration Routine 2513 * atl1e_init_module - Driver Registration Routine
2533 * 2514 *
2534 * atl1e_init_module is the first routine called when the driver is 2515 * atl1e_init_module is the first routine called when the driver is
@@ -2539,7 +2520,7 @@ static int __init atl1e_init_module(void)
2539 return pci_register_driver(&atl1e_driver); 2520 return pci_register_driver(&atl1e_driver);
2540} 2521}
2541 2522
2542/* 2523/**
2543 * atl1e_exit_module - Driver Exit Cleanup Routine 2524 * atl1e_exit_module - Driver Exit Cleanup Routine
2544 * 2525 *
2545 * atl1e_exit_module is called just before the driver is removed 2526 * atl1e_exit_module is called just before the driver is removed
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_param.c b/drivers/net/ethernet/atheros/atl1e/atl1e_param.c
index 0ce60b6e7ef0..b5086f1e637f 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_param.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_param.c
@@ -168,7 +168,7 @@ static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt,
168 return -1; 168 return -1;
169} 169}
170 170
171/* 171/**
172 * atl1e_check_options - Range Checking for Command Line Parameters 172 * atl1e_check_options - Range Checking for Command Line Parameters
173 * @adapter: board private structure 173 * @adapter: board private structure
174 * 174 *
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 5d10884e5080..7bae2ad7a7c0 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -195,7 +195,7 @@ static int __devinit atl1_validate_option(int *value, struct atl1_option *opt,
195 return -1; 195 return -1;
196} 196}
197 197
198/* 198/**
199 * atl1_check_options - Range Checking for Command Line Parameters 199 * atl1_check_options - Range Checking for Command Line Parameters
200 * @adapter: board private structure 200 * @adapter: board private structure
201 * 201 *
@@ -538,7 +538,7 @@ static s32 atl1_read_mac_addr(struct atl1_hw *hw)
538 u16 i; 538 u16 i;
539 539
540 if (atl1_get_permanent_address(hw)) { 540 if (atl1_get_permanent_address(hw)) {
541 random_ether_addr(hw->perm_mac_addr); 541 eth_random_addr(hw->perm_mac_addr);
542 ret = 1; 542 ret = 1;
543 } 543 }
544 544
@@ -937,7 +937,7 @@ static void atl1_set_mac_addr(struct atl1_hw *hw)
937 iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2)); 937 iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2));
938} 938}
939 939
940/* 940/**
941 * atl1_sw_init - Initialize general software structures (struct atl1_adapter) 941 * atl1_sw_init - Initialize general software structures (struct atl1_adapter)
942 * @adapter: board private structure to initialize 942 * @adapter: board private structure to initialize
943 * 943 *
@@ -1014,12 +1014,6 @@ static void mdio_write(struct net_device *netdev, int phy_id, int reg_num,
1014 atl1_write_phy_reg(&adapter->hw, reg_num, val); 1014 atl1_write_phy_reg(&adapter->hw, reg_num, val);
1015} 1015}
1016 1016
1017/*
1018 * atl1_mii_ioctl -
1019 * @netdev:
1020 * @ifreq:
1021 * @cmd:
1022 */
1023static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 1017static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1024{ 1018{
1025 struct atl1_adapter *adapter = netdev_priv(netdev); 1019 struct atl1_adapter *adapter = netdev_priv(netdev);
@@ -1036,7 +1030,7 @@ static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1036 return retval; 1030 return retval;
1037} 1031}
1038 1032
1039/* 1033/**
1040 * atl1_setup_mem_resources - allocate Tx / RX descriptor resources 1034 * atl1_setup_mem_resources - allocate Tx / RX descriptor resources
1041 * @adapter: board private structure 1035 * @adapter: board private structure
1042 * 1036 *
@@ -1061,7 +1055,7 @@ static s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
1061 goto err_nomem; 1055 goto err_nomem;
1062 } 1056 }
1063 rfd_ring->buffer_info = 1057 rfd_ring->buffer_info =
1064 (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count); 1058 (tpd_ring->buffer_info + tpd_ring->count);
1065 1059
1066 /* 1060 /*
1067 * real ring DMA buffer 1061 * real ring DMA buffer
@@ -1147,7 +1141,7 @@ static void atl1_init_ring_ptrs(struct atl1_adapter *adapter)
1147 atomic_set(&rrd_ring->next_to_clean, 0); 1141 atomic_set(&rrd_ring->next_to_clean, 0);
1148} 1142}
1149 1143
1150/* 1144/**
1151 * atl1_clean_rx_ring - Free RFD Buffers 1145 * atl1_clean_rx_ring - Free RFD Buffers
1152 * @adapter: board private structure 1146 * @adapter: board private structure
1153 */ 1147 */
@@ -1187,7 +1181,7 @@ static void atl1_clean_rx_ring(struct atl1_adapter *adapter)
1187 atomic_set(&rrd_ring->next_to_clean, 0); 1181 atomic_set(&rrd_ring->next_to_clean, 0);
1188} 1182}
1189 1183
1190/* 1184/**
1191 * atl1_clean_tx_ring - Free Tx Buffers 1185 * atl1_clean_tx_ring - Free Tx Buffers
1192 * @adapter: board private structure 1186 * @adapter: board private structure
1193 */ 1187 */
@@ -1227,7 +1221,7 @@ static void atl1_clean_tx_ring(struct atl1_adapter *adapter)
1227 atomic_set(&tpd_ring->next_to_clean, 0); 1221 atomic_set(&tpd_ring->next_to_clean, 0);
1228} 1222}
1229 1223
1230/* 1224/**
1231 * atl1_free_ring_resources - Free Tx / RX descriptor Resources 1225 * atl1_free_ring_resources - Free Tx / RX descriptor Resources
1232 * @adapter: board private structure 1226 * @adapter: board private structure
1233 * 1227 *
@@ -1470,7 +1464,7 @@ static void set_flow_ctrl_new(struct atl1_hw *hw)
1470 iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH); 1464 iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
1471} 1465}
1472 1466
1473/* 1467/**
1474 * atl1_configure - Configure Transmit&Receive Unit after Reset 1468 * atl1_configure - Configure Transmit&Receive Unit after Reset
1475 * @adapter: board private structure 1469 * @adapter: board private structure
1476 * 1470 *
@@ -1844,7 +1838,7 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter,
1844 } 1838 }
1845} 1839}
1846 1840
1847/* 1841/**
1848 * atl1_alloc_rx_buffers - Replace used receive buffers 1842 * atl1_alloc_rx_buffers - Replace used receive buffers
1849 * @adapter: address of board private structure 1843 * @adapter: address of board private structure
1850 */ 1844 */
@@ -2489,11 +2483,10 @@ static inline int atl1_sched_rings_clean(struct atl1_adapter* adapter)
2489 return 1; 2483 return 1;
2490} 2484}
2491 2485
2492/* 2486/**
2493 * atl1_intr - Interrupt Handler 2487 * atl1_intr - Interrupt Handler
2494 * @irq: interrupt number 2488 * @irq: interrupt number
2495 * @data: pointer to a network interface device structure 2489 * @data: pointer to a network interface device structure
2496 * @pt_regs: CPU registers structure
2497 */ 2490 */
2498static irqreturn_t atl1_intr(int irq, void *data) 2491static irqreturn_t atl1_intr(int irq, void *data)
2499{ 2492{
@@ -2574,7 +2567,7 @@ static irqreturn_t atl1_intr(int irq, void *data)
2574} 2567}
2575 2568
2576 2569
2577/* 2570/**
2578 * atl1_phy_config - Timer Call-back 2571 * atl1_phy_config - Timer Call-back
2579 * @data: pointer to netdev cast into an unsigned long 2572 * @data: pointer to netdev cast into an unsigned long
2580 */ 2573 */
@@ -2693,7 +2686,7 @@ static void atl1_reset_dev_task(struct work_struct *work)
2693 netif_device_attach(netdev); 2686 netif_device_attach(netdev);
2694} 2687}
2695 2688
2696/* 2689/**
2697 * atl1_change_mtu - Change the Maximum Transfer Unit 2690 * atl1_change_mtu - Change the Maximum Transfer Unit
2698 * @netdev: network interface device structure 2691 * @netdev: network interface device structure
2699 * @new_mtu: new value for maximum frame size 2692 * @new_mtu: new value for maximum frame size
@@ -2727,7 +2720,7 @@ static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
2727 return 0; 2720 return 0;
2728} 2721}
2729 2722
2730/* 2723/**
2731 * atl1_open - Called when a network interface is made active 2724 * atl1_open - Called when a network interface is made active
2732 * @netdev: network interface device structure 2725 * @netdev: network interface device structure
2733 * 2726 *
@@ -2762,7 +2755,7 @@ err_up:
2762 return err; 2755 return err;
2763} 2756}
2764 2757
2765/* 2758/**
2766 * atl1_close - Disables a network interface 2759 * atl1_close - Disables a network interface
2767 * @netdev: network interface device structure 2760 * @netdev: network interface device structure
2768 * 2761 *
@@ -2930,7 +2923,7 @@ static const struct net_device_ops atl1_netdev_ops = {
2930#endif 2923#endif
2931}; 2924};
2932 2925
2933/* 2926/**
2934 * atl1_probe - Device Initialization Routine 2927 * atl1_probe - Device Initialization Routine
2935 * @pdev: PCI device information struct 2928 * @pdev: PCI device information struct
2936 * @ent: entry in atl1_pci_tbl 2929 * @ent: entry in atl1_pci_tbl
@@ -3111,7 +3104,7 @@ err_request_regions:
3111 return err; 3104 return err;
3112} 3105}
3113 3106
3114/* 3107/**
3115 * atl1_remove - Device Removal Routine 3108 * atl1_remove - Device Removal Routine
3116 * @pdev: PCI device information struct 3109 * @pdev: PCI device information struct
3117 * 3110 *
@@ -3158,7 +3151,7 @@ static struct pci_driver atl1_driver = {
3158 .driver.pm = ATL1_PM_OPS, 3151 .driver.pm = ATL1_PM_OPS,
3159}; 3152};
3160 3153
3161/* 3154/**
3162 * atl1_exit_module - Driver Exit Cleanup Routine 3155 * atl1_exit_module - Driver Exit Cleanup Routine
3163 * 3156 *
3164 * atl1_exit_module is called just before the driver is removed 3157 * atl1_exit_module is called just before the driver is removed
@@ -3169,7 +3162,7 @@ static void __exit atl1_exit_module(void)
3169 pci_unregister_driver(&atl1_driver); 3162 pci_unregister_driver(&atl1_driver);
3170} 3163}
3171 3164
3172/* 3165/**
3173 * atl1_init_module - Driver Registration Routine 3166 * atl1_init_module - Driver Registration Routine
3174 * 3167 *
3175 * atl1_init_module is the first routine called when the driver is 3168 * atl1_init_module is the first routine called when the driver is
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 6762dc406b25..57d64b80fd72 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -75,7 +75,7 @@ static void atl2_set_ethtool_ops(struct net_device *netdev);
75 75
76static void atl2_check_options(struct atl2_adapter *adapter); 76static void atl2_check_options(struct atl2_adapter *adapter);
77 77
78/* 78/**
79 * atl2_sw_init - Initialize general software structures (struct atl2_adapter) 79 * atl2_sw_init - Initialize general software structures (struct atl2_adapter)
80 * @adapter: board private structure to initialize 80 * @adapter: board private structure to initialize
81 * 81 *
@@ -123,7 +123,7 @@ static int __devinit atl2_sw_init(struct atl2_adapter *adapter)
123 return 0; 123 return 0;
124} 124}
125 125
126/* 126/**
127 * atl2_set_multi - Multicast and Promiscuous mode set 127 * atl2_set_multi - Multicast and Promiscuous mode set
128 * @netdev: network interface device structure 128 * @netdev: network interface device structure
129 * 129 *
@@ -177,7 +177,7 @@ static void init_ring_ptrs(struct atl2_adapter *adapter)
177 adapter->txs_next_clear = 0; 177 adapter->txs_next_clear = 0;
178} 178}
179 179
180/* 180/**
181 * atl2_configure - Configure Transmit&Receive Unit after Reset 181 * atl2_configure - Configure Transmit&Receive Unit after Reset
182 * @adapter: board private structure 182 * @adapter: board private structure
183 * 183 *
@@ -283,7 +283,7 @@ static int atl2_configure(struct atl2_adapter *adapter)
283 return value; 283 return value;
284} 284}
285 285
286/* 286/**
287 * atl2_setup_ring_resources - allocate Tx / RX descriptor resources 287 * atl2_setup_ring_resources - allocate Tx / RX descriptor resources
288 * @adapter: board private structure 288 * @adapter: board private structure
289 * 289 *
@@ -340,7 +340,7 @@ static s32 atl2_setup_ring_resources(struct atl2_adapter *adapter)
340 return 0; 340 return 0;
341} 341}
342 342
343/* 343/**
344 * atl2_irq_enable - Enable default interrupt generation settings 344 * atl2_irq_enable - Enable default interrupt generation settings
345 * @adapter: board private structure 345 * @adapter: board private structure
346 */ 346 */
@@ -350,7 +350,7 @@ static inline void atl2_irq_enable(struct atl2_adapter *adapter)
350 ATL2_WRITE_FLUSH(&adapter->hw); 350 ATL2_WRITE_FLUSH(&adapter->hw);
351} 351}
352 352
353/* 353/**
354 * atl2_irq_disable - Mask off interrupt generation on the NIC 354 * atl2_irq_disable - Mask off interrupt generation on the NIC
355 * @adapter: board private structure 355 * @adapter: board private structure
356 */ 356 */
@@ -599,11 +599,10 @@ static inline void atl2_clear_phy_int(struct atl2_adapter *adapter)
599 spin_unlock(&adapter->stats_lock); 599 spin_unlock(&adapter->stats_lock);
600} 600}
601 601
602/* 602/**
603 * atl2_intr - Interrupt Handler 603 * atl2_intr - Interrupt Handler
604 * @irq: interrupt number 604 * @irq: interrupt number
605 * @data: pointer to a network interface device structure 605 * @data: pointer to a network interface device structure
606 * @pt_regs: CPU registers structure
607 */ 606 */
608static irqreturn_t atl2_intr(int irq, void *data) 607static irqreturn_t atl2_intr(int irq, void *data)
609{ 608{
@@ -679,7 +678,7 @@ static int atl2_request_irq(struct atl2_adapter *adapter)
679 netdev); 678 netdev);
680} 679}
681 680
682/* 681/**
683 * atl2_free_ring_resources - Free Tx / RX descriptor Resources 682 * atl2_free_ring_resources - Free Tx / RX descriptor Resources
684 * @adapter: board private structure 683 * @adapter: board private structure
685 * 684 *
@@ -692,7 +691,7 @@ static void atl2_free_ring_resources(struct atl2_adapter *adapter)
692 adapter->ring_dma); 691 adapter->ring_dma);
693} 692}
694 693
695/* 694/**
696 * atl2_open - Called when a network interface is made active 695 * atl2_open - Called when a network interface is made active
697 * @netdev: network interface device structure 696 * @netdev: network interface device structure
698 * 697 *
@@ -798,7 +797,7 @@ static void atl2_free_irq(struct atl2_adapter *adapter)
798#endif 797#endif
799} 798}
800 799
801/* 800/**
802 * atl2_close - Disables a network interface 801 * atl2_close - Disables a network interface
803 * @netdev: network interface device structure 802 * @netdev: network interface device structure
804 * 803 *
@@ -918,7 +917,7 @@ static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb,
918 return NETDEV_TX_OK; 917 return NETDEV_TX_OK;
919} 918}
920 919
921/* 920/**
922 * atl2_change_mtu - Change the Maximum Transfer Unit 921 * atl2_change_mtu - Change the Maximum Transfer Unit
923 * @netdev: network interface device structure 922 * @netdev: network interface device structure
924 * @new_mtu: new value for maximum frame size 923 * @new_mtu: new value for maximum frame size
@@ -943,7 +942,7 @@ static int atl2_change_mtu(struct net_device *netdev, int new_mtu)
943 return 0; 942 return 0;
944} 943}
945 944
946/* 945/**
947 * atl2_set_mac - Change the Ethernet Address of the NIC 946 * atl2_set_mac - Change the Ethernet Address of the NIC
948 * @netdev: network interface device structure 947 * @netdev: network interface device structure
949 * @p: pointer to an address structure 948 * @p: pointer to an address structure
@@ -969,12 +968,6 @@ static int atl2_set_mac(struct net_device *netdev, void *p)
969 return 0; 968 return 0;
970} 969}
971 970
972/*
973 * atl2_mii_ioctl -
974 * @netdev:
975 * @ifreq:
976 * @cmd:
977 */
978static int atl2_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 971static int atl2_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
979{ 972{
980 struct atl2_adapter *adapter = netdev_priv(netdev); 973 struct atl2_adapter *adapter = netdev_priv(netdev);
@@ -1011,12 +1004,6 @@ static int atl2_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1011 return 0; 1004 return 0;
1012} 1005}
1013 1006
1014/*
1015 * atl2_ioctl -
1016 * @netdev:
1017 * @ifreq:
1018 * @cmd:
1019 */
1020static int atl2_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 1007static int atl2_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1021{ 1008{
1022 switch (cmd) { 1009 switch (cmd) {
@@ -1033,7 +1020,7 @@ static int atl2_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1033 } 1020 }
1034} 1021}
1035 1022
1036/* 1023/**
1037 * atl2_tx_timeout - Respond to a Tx Hang 1024 * atl2_tx_timeout - Respond to a Tx Hang
1038 * @netdev: network interface device structure 1025 * @netdev: network interface device structure
1039 */ 1026 */
@@ -1045,7 +1032,7 @@ static void atl2_tx_timeout(struct net_device *netdev)
1045 schedule_work(&adapter->reset_task); 1032 schedule_work(&adapter->reset_task);
1046} 1033}
1047 1034
1048/* 1035/**
1049 * atl2_watchdog - Timer Call-back 1036 * atl2_watchdog - Timer Call-back
1050 * @data: pointer to netdev cast into an unsigned long 1037 * @data: pointer to netdev cast into an unsigned long
1051 */ 1038 */
@@ -1070,7 +1057,7 @@ static void atl2_watchdog(unsigned long data)
1070 } 1057 }
1071} 1058}
1072 1059
1073/* 1060/**
1074 * atl2_phy_config - Timer Call-back 1061 * atl2_phy_config - Timer Call-back
1075 * @data: pointer to netdev cast into an unsigned long 1062 * @data: pointer to netdev cast into an unsigned long
1076 */ 1063 */
@@ -1274,9 +1261,8 @@ static int atl2_check_link(struct atl2_adapter *adapter)
1274 return 0; 1261 return 0;
1275} 1262}
1276 1263
1277/* 1264/**
1278 * atl2_link_chg_task - deal with link change event Out of interrupt context 1265 * atl2_link_chg_task - deal with link change event Out of interrupt context
1279 * @netdev: network interface device structure
1280 */ 1266 */
1281static void atl2_link_chg_task(struct work_struct *work) 1267static void atl2_link_chg_task(struct work_struct *work)
1282{ 1268{
@@ -1341,7 +1327,7 @@ static const struct net_device_ops atl2_netdev_ops = {
1341#endif 1327#endif
1342}; 1328};
1343 1329
1344/* 1330/**
1345 * atl2_probe - Device Initialization Routine 1331 * atl2_probe - Device Initialization Routine
1346 * @pdev: PCI device information struct 1332 * @pdev: PCI device information struct
1347 * @ent: entry in atl2_pci_tbl 1333 * @ent: entry in atl2_pci_tbl
@@ -1501,7 +1487,7 @@ err_dma:
1501 return err; 1487 return err;
1502} 1488}
1503 1489
1504/* 1490/**
1505 * atl2_remove - Device Removal Routine 1491 * atl2_remove - Device Removal Routine
1506 * @pdev: PCI device information struct 1492 * @pdev: PCI device information struct
1507 * 1493 *
@@ -1728,7 +1714,7 @@ static struct pci_driver atl2_driver = {
1728 .shutdown = atl2_shutdown, 1714 .shutdown = atl2_shutdown,
1729}; 1715};
1730 1716
1731/* 1717/**
1732 * atl2_init_module - Driver Registration Routine 1718 * atl2_init_module - Driver Registration Routine
1733 * 1719 *
1734 * atl2_init_module is the first routine called when the driver is 1720 * atl2_init_module is the first routine called when the driver is
@@ -1743,7 +1729,7 @@ static int __init atl2_init_module(void)
1743} 1729}
1744module_init(atl2_init_module); 1730module_init(atl2_init_module);
1745 1731
1746/* 1732/**
1747 * atl2_exit_module - Driver Exit Cleanup Routine 1733 * atl2_exit_module - Driver Exit Cleanup Routine
1748 * 1734 *
1749 * atl2_exit_module is called just before the driver is removed 1735 * atl2_exit_module is called just before the driver is removed
@@ -2360,7 +2346,7 @@ static s32 atl2_read_mac_addr(struct atl2_hw *hw)
2360{ 2346{
2361 if (get_permanent_address(hw)) { 2347 if (get_permanent_address(hw)) {
2362 /* for test */ 2348 /* for test */
2363 /* FIXME: shouldn't we use random_ether_addr() here? */ 2349 /* FIXME: shouldn't we use eth_random_addr() here? */
2364 hw->perm_mac_addr[0] = 0x00; 2350 hw->perm_mac_addr[0] = 0x00;
2365 hw->perm_mac_addr[1] = 0x13; 2351 hw->perm_mac_addr[1] = 0x13;
2366 hw->perm_mac_addr[2] = 0x74; 2352 hw->perm_mac_addr[2] = 0x74;
@@ -2997,7 +2983,7 @@ static int __devinit atl2_validate_option(int *value, struct atl2_option *opt)
2997 return -1; 2983 return -1;
2998} 2984}
2999 2985
3000/* 2986/**
3001 * atl2_check_options - Range Checking for Command Line Parameters 2987 * atl2_check_options - Range Checking for Command Line Parameters
3002 * @adapter: board private structure 2988 * @adapter: board private structure
3003 * 2989 *
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
index b4f3aa49a7fc..77ffbc4a5071 100644
--- a/drivers/net/ethernet/atheros/atlx/atlx.c
+++ b/drivers/net/ethernet/atheros/atlx/atlx.c
@@ -64,7 +64,7 @@ static int atlx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
64 } 64 }
65} 65}
66 66
67/* 67/**
68 * atlx_set_mac - Change the Ethernet Address of the NIC 68 * atlx_set_mac - Change the Ethernet Address of the NIC
69 * @netdev: network interface device structure 69 * @netdev: network interface device structure
70 * @p: pointer to an address structure 70 * @p: pointer to an address structure
@@ -115,7 +115,7 @@ static void atlx_check_for_link(struct atlx_adapter *adapter)
115 schedule_work(&adapter->link_chg_task); 115 schedule_work(&adapter->link_chg_task);
116} 116}
117 117
118/* 118/**
119 * atlx_set_multi - Multicast and Promiscuous mode set 119 * atlx_set_multi - Multicast and Promiscuous mode set
120 * @netdev: network interface device structure 120 * @netdev: network interface device structure
121 * 121 *
@@ -162,7 +162,7 @@ static inline void atlx_imr_set(struct atlx_adapter *adapter,
162 ioread32(adapter->hw.hw_addr + REG_IMR); 162 ioread32(adapter->hw.hw_addr + REG_IMR);
163} 163}
164 164
165/* 165/**
166 * atlx_irq_enable - Enable default interrupt generation settings 166 * atlx_irq_enable - Enable default interrupt generation settings
167 * @adapter: board private structure 167 * @adapter: board private structure
168 */ 168 */
@@ -172,7 +172,7 @@ static void atlx_irq_enable(struct atlx_adapter *adapter)
172 adapter->int_enabled = true; 172 adapter->int_enabled = true;
173} 173}
174 174
175/* 175/**
176 * atlx_irq_disable - Mask off interrupt generation on the NIC 176 * atlx_irq_disable - Mask off interrupt generation on the NIC
177 * @adapter: board private structure 177 * @adapter: board private structure
178 */ 178 */
@@ -193,7 +193,7 @@ static void atlx_clear_phy_int(struct atlx_adapter *adapter)
193 spin_unlock_irqrestore(&adapter->lock, flags); 193 spin_unlock_irqrestore(&adapter->lock, flags);
194} 194}
195 195
196/* 196/**
197 * atlx_tx_timeout - Respond to a Tx Hang 197 * atlx_tx_timeout - Respond to a Tx Hang
198 * @netdev: network interface device structure 198 * @netdev: network interface device structure
199 */ 199 */
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index d09c6b583d17..9786c0e9890e 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -483,9 +483,11 @@ out:
483static void b44_stats_update(struct b44 *bp) 483static void b44_stats_update(struct b44 *bp)
484{ 484{
485 unsigned long reg; 485 unsigned long reg;
486 u32 *val; 486 u64 *val;
487 487
488 val = &bp->hw_stats.tx_good_octets; 488 val = &bp->hw_stats.tx_good_octets;
489 u64_stats_update_begin(&bp->hw_stats.syncp);
490
489 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) { 491 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
490 *val++ += br32(bp, reg); 492 *val++ += br32(bp, reg);
491 } 493 }
@@ -496,6 +498,8 @@ static void b44_stats_update(struct b44 *bp)
496 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) { 498 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
497 *val++ += br32(bp, reg); 499 *val++ += br32(bp, reg);
498 } 500 }
501
502 u64_stats_update_end(&bp->hw_stats.syncp);
499} 503}
500 504
501static void b44_link_report(struct b44 *bp) 505static void b44_link_report(struct b44 *bp)
@@ -1635,44 +1639,49 @@ static int b44_close(struct net_device *dev)
1635 return 0; 1639 return 0;
1636} 1640}
1637 1641
1638static struct net_device_stats *b44_get_stats(struct net_device *dev) 1642static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
1643 struct rtnl_link_stats64 *nstat)
1639{ 1644{
1640 struct b44 *bp = netdev_priv(dev); 1645 struct b44 *bp = netdev_priv(dev);
1641 struct net_device_stats *nstat = &dev->stats;
1642 struct b44_hw_stats *hwstat = &bp->hw_stats; 1646 struct b44_hw_stats *hwstat = &bp->hw_stats;
1643 1647 unsigned int start;
1644 /* Convert HW stats into netdevice stats. */ 1648
1645 nstat->rx_packets = hwstat->rx_pkts; 1649 do {
1646 nstat->tx_packets = hwstat->tx_pkts; 1650 start = u64_stats_fetch_begin_bh(&hwstat->syncp);
1647 nstat->rx_bytes = hwstat->rx_octets; 1651
1648 nstat->tx_bytes = hwstat->tx_octets; 1652 /* Convert HW stats into rtnl_link_stats64 stats. */
1649 nstat->tx_errors = (hwstat->tx_jabber_pkts + 1653 nstat->rx_packets = hwstat->rx_pkts;
1650 hwstat->tx_oversize_pkts + 1654 nstat->tx_packets = hwstat->tx_pkts;
1651 hwstat->tx_underruns + 1655 nstat->rx_bytes = hwstat->rx_octets;
1652 hwstat->tx_excessive_cols + 1656 nstat->tx_bytes = hwstat->tx_octets;
1653 hwstat->tx_late_cols); 1657 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1654 nstat->multicast = hwstat->tx_multicast_pkts; 1658 hwstat->tx_oversize_pkts +
1655 nstat->collisions = hwstat->tx_total_cols; 1659 hwstat->tx_underruns +
1656 1660 hwstat->tx_excessive_cols +
1657 nstat->rx_length_errors = (hwstat->rx_oversize_pkts + 1661 hwstat->tx_late_cols);
1658 hwstat->rx_undersize); 1662 nstat->multicast = hwstat->tx_multicast_pkts;
1659 nstat->rx_over_errors = hwstat->rx_missed_pkts; 1663 nstat->collisions = hwstat->tx_total_cols;
1660 nstat->rx_frame_errors = hwstat->rx_align_errs; 1664
1661 nstat->rx_crc_errors = hwstat->rx_crc_errs; 1665 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1662 nstat->rx_errors = (hwstat->rx_jabber_pkts + 1666 hwstat->rx_undersize);
1663 hwstat->rx_oversize_pkts + 1667 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1664 hwstat->rx_missed_pkts + 1668 nstat->rx_frame_errors = hwstat->rx_align_errs;
1665 hwstat->rx_crc_align_errs + 1669 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1666 hwstat->rx_undersize + 1670 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1667 hwstat->rx_crc_errs + 1671 hwstat->rx_oversize_pkts +
1668 hwstat->rx_align_errs + 1672 hwstat->rx_missed_pkts +
1669 hwstat->rx_symbol_errs); 1673 hwstat->rx_crc_align_errs +
1670 1674 hwstat->rx_undersize +
1671 nstat->tx_aborted_errors = hwstat->tx_underruns; 1675 hwstat->rx_crc_errs +
1676 hwstat->rx_align_errs +
1677 hwstat->rx_symbol_errs);
1678
1679 nstat->tx_aborted_errors = hwstat->tx_underruns;
1672#if 0 1680#if 0
1673 /* Carrier lost counter seems to be broken for some devices */ 1681 /* Carrier lost counter seems to be broken for some devices */
1674 nstat->tx_carrier_errors = hwstat->tx_carrier_lost; 1682 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1675#endif 1683#endif
1684 } while (u64_stats_fetch_retry_bh(&hwstat->syncp, start));
1676 1685
1677 return nstat; 1686 return nstat;
1678} 1687}
@@ -1993,17 +2002,24 @@ static void b44_get_ethtool_stats(struct net_device *dev,
1993 struct ethtool_stats *stats, u64 *data) 2002 struct ethtool_stats *stats, u64 *data)
1994{ 2003{
1995 struct b44 *bp = netdev_priv(dev); 2004 struct b44 *bp = netdev_priv(dev);
1996 u32 *val = &bp->hw_stats.tx_good_octets; 2005 struct b44_hw_stats *hwstat = &bp->hw_stats;
2006 u64 *data_src, *data_dst;
2007 unsigned int start;
1997 u32 i; 2008 u32 i;
1998 2009
1999 spin_lock_irq(&bp->lock); 2010 spin_lock_irq(&bp->lock);
2000
2001 b44_stats_update(bp); 2011 b44_stats_update(bp);
2012 spin_unlock_irq(&bp->lock);
2002 2013
2003 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++) 2014 do {
2004 *data++ = *val++; 2015 data_src = &hwstat->tx_good_octets;
2016 data_dst = data;
2017 start = u64_stats_fetch_begin_bh(&hwstat->syncp);
2005 2018
2006 spin_unlock_irq(&bp->lock); 2019 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
2020 *data_dst++ = *data_src++;
2021
2022 } while (u64_stats_fetch_retry_bh(&hwstat->syncp, start));
2007} 2023}
2008 2024
2009static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2025static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -2113,7 +2129,7 @@ static const struct net_device_ops b44_netdev_ops = {
2113 .ndo_open = b44_open, 2129 .ndo_open = b44_open,
2114 .ndo_stop = b44_close, 2130 .ndo_stop = b44_close,
2115 .ndo_start_xmit = b44_start_xmit, 2131 .ndo_start_xmit = b44_start_xmit,
2116 .ndo_get_stats = b44_get_stats, 2132 .ndo_get_stats64 = b44_get_stats64,
2117 .ndo_set_rx_mode = b44_set_rx_mode, 2133 .ndo_set_rx_mode = b44_set_rx_mode,
2118 .ndo_set_mac_address = b44_set_mac_addr, 2134 .ndo_set_mac_address = b44_set_mac_addr,
2119 .ndo_validate_addr = eth_validate_addr, 2135 .ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/broadcom/b44.h b/drivers/net/ethernet/broadcom/b44.h
index e1905a49279f..8993d72f0420 100644
--- a/drivers/net/ethernet/broadcom/b44.h
+++ b/drivers/net/ethernet/broadcom/b44.h
@@ -338,9 +338,10 @@ struct ring_info {
338 * the layout 338 * the layout
339 */ 339 */
340struct b44_hw_stats { 340struct b44_hw_stats {
341#define _B44(x) u32 x; 341#define _B44(x) u64 x;
342B44_STAT_REG_DECLARE 342B44_STAT_REG_DECLARE
343#undef _B44 343#undef _B44
344 struct u64_stats_sync syncp;
344}; 345};
345 346
346struct ssb_device; 347struct ssb_device;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 1fa4927a45b1..79cebd8525ce 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -14,6 +14,7 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/moduleparam.h> 15#include <linux/moduleparam.h>
16 16
17#include <linux/stringify.h>
17#include <linux/kernel.h> 18#include <linux/kernel.h>
18#include <linux/timer.h> 19#include <linux/timer.h>
19#include <linux/errno.h> 20#include <linux/errno.h>
@@ -57,8 +58,8 @@
57#include "bnx2_fw.h" 58#include "bnx2_fw.h"
58 59
59#define DRV_MODULE_NAME "bnx2" 60#define DRV_MODULE_NAME "bnx2"
60#define DRV_MODULE_VERSION "2.2.1" 61#define DRV_MODULE_VERSION "2.2.3"
61#define DRV_MODULE_RELDATE "Dec 18, 2011" 62#define DRV_MODULE_RELDATE "June 27, 2012"
62#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw" 63#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
63#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw" 64#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
64#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw" 65#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
@@ -872,8 +873,7 @@ bnx2_alloc_mem(struct bnx2 *bp)
872 873
873 bnapi = &bp->bnx2_napi[i]; 874 bnapi = &bp->bnx2_napi[i];
874 875
875 sblk = (void *) (status_blk + 876 sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
876 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
877 bnapi->status_blk.msix = sblk; 877 bnapi->status_blk.msix = sblk;
878 bnapi->hw_tx_cons_ptr = 878 bnapi->hw_tx_cons_ptr =
879 &sblk->status_tx_quick_consumer_index; 879 &sblk->status_tx_quick_consumer_index;
@@ -1972,22 +1972,26 @@ bnx2_remote_phy_event(struct bnx2 *bp)
1972 switch (speed) { 1972 switch (speed) {
1973 case BNX2_LINK_STATUS_10HALF: 1973 case BNX2_LINK_STATUS_10HALF:
1974 bp->duplex = DUPLEX_HALF; 1974 bp->duplex = DUPLEX_HALF;
1975 /* fall through */
1975 case BNX2_LINK_STATUS_10FULL: 1976 case BNX2_LINK_STATUS_10FULL:
1976 bp->line_speed = SPEED_10; 1977 bp->line_speed = SPEED_10;
1977 break; 1978 break;
1978 case BNX2_LINK_STATUS_100HALF: 1979 case BNX2_LINK_STATUS_100HALF:
1979 bp->duplex = DUPLEX_HALF; 1980 bp->duplex = DUPLEX_HALF;
1981 /* fall through */
1980 case BNX2_LINK_STATUS_100BASE_T4: 1982 case BNX2_LINK_STATUS_100BASE_T4:
1981 case BNX2_LINK_STATUS_100FULL: 1983 case BNX2_LINK_STATUS_100FULL:
1982 bp->line_speed = SPEED_100; 1984 bp->line_speed = SPEED_100;
1983 break; 1985 break;
1984 case BNX2_LINK_STATUS_1000HALF: 1986 case BNX2_LINK_STATUS_1000HALF:
1985 bp->duplex = DUPLEX_HALF; 1987 bp->duplex = DUPLEX_HALF;
1988 /* fall through */
1986 case BNX2_LINK_STATUS_1000FULL: 1989 case BNX2_LINK_STATUS_1000FULL:
1987 bp->line_speed = SPEED_1000; 1990 bp->line_speed = SPEED_1000;
1988 break; 1991 break;
1989 case BNX2_LINK_STATUS_2500HALF: 1992 case BNX2_LINK_STATUS_2500HALF:
1990 bp->duplex = DUPLEX_HALF; 1993 bp->duplex = DUPLEX_HALF;
1994 /* fall through */
1991 case BNX2_LINK_STATUS_2500FULL: 1995 case BNX2_LINK_STATUS_2500FULL:
1992 bp->line_speed = SPEED_2500; 1996 bp->line_speed = SPEED_2500;
1993 break; 1997 break;
@@ -2473,6 +2477,7 @@ bnx2_dump_mcp_state(struct bnx2 *bp)
2473 bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE)); 2477 bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2474 pr_cont(" condition[%08x]\n", 2478 pr_cont(" condition[%08x]\n",
2475 bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION)); 2479 bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2480 DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2476 DP_SHMEM_LINE(bp, 0x3cc); 2481 DP_SHMEM_LINE(bp, 0x3cc);
2477 DP_SHMEM_LINE(bp, 0x3dc); 2482 DP_SHMEM_LINE(bp, 0x3dc);
2478 DP_SHMEM_LINE(bp, 0x3ec); 2483 DP_SHMEM_LINE(bp, 0x3ec);
@@ -6245,7 +6250,7 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6245static int 6250static int
6246bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi) 6251bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6247{ 6252{
6248 int cpus = num_online_cpus(); 6253 int cpus = netif_get_num_default_rss_queues();
6249 int msix_vecs; 6254 int msix_vecs;
6250 6255
6251 if (!bp->num_req_rx_rings) 6256 if (!bp->num_req_rx_rings)
@@ -6383,6 +6388,7 @@ bnx2_reset_task(struct work_struct *work)
6383{ 6388{
6384 struct bnx2 *bp = container_of(work, struct bnx2, reset_task); 6389 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6385 int rc; 6390 int rc;
6391 u16 pcicmd;
6386 6392
6387 rtnl_lock(); 6393 rtnl_lock();
6388 if (!netif_running(bp->dev)) { 6394 if (!netif_running(bp->dev)) {
@@ -6392,6 +6398,12 @@ bnx2_reset_task(struct work_struct *work)
6392 6398
6393 bnx2_netif_stop(bp, true); 6399 bnx2_netif_stop(bp, true);
6394 6400
6401 pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6402 if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6403 /* in case PCI block has reset */
6404 pci_restore_state(bp->pdev);
6405 pci_save_state(bp->pdev);
6406 }
6395 rc = bnx2_init_nic(bp, 1); 6407 rc = bnx2_init_nic(bp, 1);
6396 if (rc) { 6408 if (rc) {
6397 netdev_err(bp->dev, "failed to reset NIC, closing\n"); 6409 netdev_err(bp->dev, "failed to reset NIC, closing\n");
@@ -6406,6 +6418,75 @@ bnx2_reset_task(struct work_struct *work)
6406 rtnl_unlock(); 6418 rtnl_unlock();
6407} 6419}
6408 6420
6421#define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6422
6423static void
6424bnx2_dump_ftq(struct bnx2 *bp)
6425{
6426 int i;
6427 u32 reg, bdidx, cid, valid;
6428 struct net_device *dev = bp->dev;
6429 static const struct ftq_reg {
6430 char *name;
6431 u32 off;
6432 } ftq_arr[] = {
6433 BNX2_FTQ_ENTRY(RV2P_P),
6434 BNX2_FTQ_ENTRY(RV2P_T),
6435 BNX2_FTQ_ENTRY(RV2P_M),
6436 BNX2_FTQ_ENTRY(TBDR_),
6437 BNX2_FTQ_ENTRY(TDMA_),
6438 BNX2_FTQ_ENTRY(TXP_),
6439 BNX2_FTQ_ENTRY(TXP_),
6440 BNX2_FTQ_ENTRY(TPAT_),
6441 BNX2_FTQ_ENTRY(RXP_C),
6442 BNX2_FTQ_ENTRY(RXP_),
6443 BNX2_FTQ_ENTRY(COM_COMXQ_),
6444 BNX2_FTQ_ENTRY(COM_COMTQ_),
6445 BNX2_FTQ_ENTRY(COM_COMQ_),
6446 BNX2_FTQ_ENTRY(CP_CPQ_),
6447 };
6448
6449 netdev_err(dev, "<--- start FTQ dump --->\n");
6450 for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6451 netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6452 bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6453
6454 netdev_err(dev, "CPU states:\n");
6455 for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6456 netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6457 reg, bnx2_reg_rd_ind(bp, reg),
6458 bnx2_reg_rd_ind(bp, reg + 4),
6459 bnx2_reg_rd_ind(bp, reg + 8),
6460 bnx2_reg_rd_ind(bp, reg + 0x1c),
6461 bnx2_reg_rd_ind(bp, reg + 0x1c),
6462 bnx2_reg_rd_ind(bp, reg + 0x20));
6463
6464 netdev_err(dev, "<--- end FTQ dump --->\n");
6465 netdev_err(dev, "<--- start TBDC dump --->\n");
6466 netdev_err(dev, "TBDC free cnt: %ld\n",
6467 REG_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6468 netdev_err(dev, "LINE CID BIDX CMD VALIDS\n");
6469 for (i = 0; i < 0x20; i++) {
6470 int j = 0;
6471
6472 REG_WR(bp, BNX2_TBDC_BD_ADDR, i);
6473 REG_WR(bp, BNX2_TBDC_CAM_OPCODE,
6474 BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6475 REG_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6476 while ((REG_RD(bp, BNX2_TBDC_COMMAND) &
6477 BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6478 j++;
6479
6480 cid = REG_RD(bp, BNX2_TBDC_CID);
6481 bdidx = REG_RD(bp, BNX2_TBDC_BIDX);
6482 valid = REG_RD(bp, BNX2_TBDC_CAM_OPCODE);
6483 netdev_err(dev, "%02x %06x %04lx %02x [%x]\n",
6484 i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6485 bdidx >> 24, (valid >> 8) & 0x0ff);
6486 }
6487 netdev_err(dev, "<--- end TBDC dump --->\n");
6488}
6489
6409static void 6490static void
6410bnx2_dump_state(struct bnx2 *bp) 6491bnx2_dump_state(struct bnx2 *bp)
6411{ 6492{
@@ -6435,6 +6516,7 @@ bnx2_tx_timeout(struct net_device *dev)
6435{ 6516{
6436 struct bnx2 *bp = netdev_priv(dev); 6517 struct bnx2 *bp = netdev_priv(dev);
6437 6518
6519 bnx2_dump_ftq(bp);
6438 bnx2_dump_state(bp); 6520 bnx2_dump_state(bp);
6439 bnx2_dump_mcp_state(bp); 6521 bnx2_dump_mcp_state(bp);
6440 6522
@@ -6628,6 +6710,7 @@ bnx2_close(struct net_device *dev)
6628 6710
6629 bnx2_disable_int_sync(bp); 6711 bnx2_disable_int_sync(bp);
6630 bnx2_napi_disable(bp); 6712 bnx2_napi_disable(bp);
6713 netif_tx_disable(dev);
6631 del_timer_sync(&bp->timer); 6714 del_timer_sync(&bp->timer);
6632 bnx2_shutdown_chip(bp); 6715 bnx2_shutdown_chip(bp);
6633 bnx2_free_irq(bp); 6716 bnx2_free_irq(bp);
@@ -7832,7 +7915,7 @@ bnx2_get_5709_media(struct bnx2 *bp)
7832 else 7915 else
7833 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8; 7916 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7834 7917
7835 if (PCI_FUNC(bp->pdev->devfn) == 0) { 7918 if (bp->func == 0) {
7836 switch (strap) { 7919 switch (strap) {
7837 case 0x4: 7920 case 0x4:
7838 case 0x5: 7921 case 0x5:
@@ -8131,9 +8214,12 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8131 8214
8132 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE); 8215 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8133 8216
8217 if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8218 bp->func = 1;
8219
8134 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) == 8220 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8135 BNX2_SHM_HDR_SIGNATURE_SIG) { 8221 BNX2_SHM_HDR_SIGNATURE_SIG) {
8136 u32 off = PCI_FUNC(pdev->devfn) << 2; 8222 u32 off = bp->func << 2;
8137 8223
8138 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off); 8224 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8139 } else 8225 } else
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index dc06bda73be7..af6451dec295 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -4642,6 +4642,47 @@ struct l2_fhdr {
4642#define BNX2_TBDR_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) 4642#define BNX2_TBDR_FTQ_CTL_CUR_DEPTH (0x3ffL<<22)
4643 4643
4644 4644
4645/*
4646 * tbdc definition
4647 * offset: 0x5400
4648 */
4649#define BNX2_TBDC_COMMAND 0x5400
4650#define BNX2_TBDC_COMMAND_CMD_ENABLED (1UL<<0)
4651#define BNX2_TBDC_COMMAND_CMD_FLUSH (1UL<<1)
4652#define BNX2_TBDC_COMMAND_CMD_SOFT_RST (1UL<<2)
4653#define BNX2_TBDC_COMMAND_CMD_REG_ARB (1UL<<3)
4654#define BNX2_TBDC_COMMAND_WRCHK_RANGE_ERROR (1UL<<4)
4655#define BNX2_TBDC_COMMAND_WRCHK_ALL_ONES_ERROR (1UL<<5)
4656#define BNX2_TBDC_COMMAND_WRCHK_ALL_ZEROS_ERROR (1UL<<6)
4657#define BNX2_TBDC_COMMAND_WRCHK_ANY_ONES_ERROR (1UL<<7)
4658#define BNX2_TBDC_COMMAND_WRCHK_ANY_ZEROS_ERROR (1UL<<8)
4659
4660#define BNX2_TBDC_STATUS 0x5404
4661#define BNX2_TBDC_STATUS_FREE_CNT (0x3fUL<<0)
4662
4663#define BNX2_TBDC_BD_ADDR 0x5424
4664
4665#define BNX2_TBDC_BIDX 0x542c
4666#define BNX2_TBDC_BDIDX_BDIDX (0xffffUL<<0)
4667#define BNX2_TBDC_BDIDX_CMD (0xffUL<<24)
4668
4669#define BNX2_TBDC_CID 0x5430
4670
4671#define BNX2_TBDC_CAM_OPCODE 0x5434
4672#define BNX2_TBDC_CAM_OPCODE_OPCODE (0x7UL<<0)
4673#define BNX2_TBDC_CAM_OPCODE_OPCODE_SEARCH (0UL<<0)
4674#define BNX2_TBDC_CAM_OPCODE_OPCODE_CACHE_WRITE (1UL<<0)
4675#define BNX2_TBDC_CAM_OPCODE_OPCODE_INVALIDATE (2UL<<0)
4676#define BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_WRITE (4UL<<0)
4677#define BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ (5UL<<0)
4678#define BNX2_TBDC_CAM_OPCODE_OPCODE_RAM_WRITE (6UL<<0)
4679#define BNX2_TBDC_CAM_OPCODE_OPCODE_RAM_READ (7UL<<0)
4680#define BNX2_TBDC_CAM_OPCODE_SMASK_BDIDX (1UL<<4)
4681#define BNX2_TBDC_CAM_OPCODE_SMASK_CID (1UL<<5)
4682#define BNX2_TBDC_CAM_OPCODE_SMASK_CMD (1UL<<6)
4683#define BNX2_TBDC_CAM_OPCODE_WMT_FAILED (1UL<<7)
4684#define BNX2_TBDC_CAM_OPCODE_CAM_VALIDS (0xffUL<<8)
4685
4645 4686
4646/* 4687/*
4647 * tdma_reg definition 4688 * tdma_reg definition
@@ -6930,6 +6971,8 @@ struct bnx2 {
6930 struct bnx2_irq irq_tbl[BNX2_MAX_MSIX_VEC]; 6971 struct bnx2_irq irq_tbl[BNX2_MAX_MSIX_VEC];
6931 int irq_nvecs; 6972 int irq_nvecs;
6932 6973
6974 u8 func;
6975
6933 u8 num_tx_rings; 6976 u8 num_tx_rings;
6934 u8 num_rx_rings; 6977 u8 num_rx_rings;
6935 6978
@@ -7314,6 +7357,8 @@ struct bnx2_rv2p_fw_file {
7314#define BNX2_BC_STATE_RESET_TYPE_VALUE(msg) (BNX2_BC_STATE_RESET_TYPE_SIG | \ 7357#define BNX2_BC_STATE_RESET_TYPE_VALUE(msg) (BNX2_BC_STATE_RESET_TYPE_SIG | \
7315 (msg)) 7358 (msg))
7316 7359
7360#define BNX2_BC_RESET_TYPE 0x000001c0
7361
7317#define BNX2_BC_STATE 0x000001c4 7362#define BNX2_BC_STATE 0x000001c4
7318#define BNX2_BC_STATE_ERR_MASK 0x0000ff00 7363#define BNX2_BC_STATE_ERR_MASK 0x0000ff00
7319#define BNX2_BC_STATE_SIGN 0x42530000 7364#define BNX2_BC_STATE_SIGN 0x42530000
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 7de824184979..dbe97918a7fd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -23,8 +23,8 @@
23 * (you will need to reboot afterwards) */ 23 * (you will need to reboot afterwards) */
24/* #define BNX2X_STOP_ON_ERROR */ 24/* #define BNX2X_STOP_ON_ERROR */
25 25
26#define DRV_MODULE_VERSION "1.72.50-0" 26#define DRV_MODULE_VERSION "1.72.51-0"
27#define DRV_MODULE_RELDATE "2012/04/23" 27#define DRV_MODULE_RELDATE "2012/06/18"
28#define BNX2X_BC_VER 0x040200 28#define BNX2X_BC_VER 0x040200
29 29
30#if defined(CONFIG_DCB) 30#if defined(CONFIG_DCB)
@@ -51,6 +51,7 @@
51 51
52#include "bnx2x_reg.h" 52#include "bnx2x_reg.h"
53#include "bnx2x_fw_defs.h" 53#include "bnx2x_fw_defs.h"
54#include "bnx2x_mfw_req.h"
54#include "bnx2x_hsi.h" 55#include "bnx2x_hsi.h"
55#include "bnx2x_link.h" 56#include "bnx2x_link.h"
56#include "bnx2x_sp.h" 57#include "bnx2x_sp.h"
@@ -248,13 +249,12 @@ enum {
248 BNX2X_MAX_CNIC_ETH_CL_ID_IDX, 249 BNX2X_MAX_CNIC_ETH_CL_ID_IDX,
249}; 250};
250 251
251#define BNX2X_CNIC_START_ETH_CID 48 252#define BNX2X_CNIC_START_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) *\
252enum { 253 (bp)->max_cos)
253 /* iSCSI L2 */ 254 /* iSCSI L2 */
254 BNX2X_ISCSI_ETH_CID = BNX2X_CNIC_START_ETH_CID, 255#define BNX2X_ISCSI_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp))
255 /* FCoE L2 */ 256 /* FCoE L2 */
256 BNX2X_FCOE_ETH_CID, 257#define BNX2X_FCOE_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp) + 1)
257};
258 258
259/** Additional rings budgeting */ 259/** Additional rings budgeting */
260#ifdef BCM_CNIC 260#ifdef BCM_CNIC
@@ -276,29 +276,30 @@ enum {
276#define FIRST_TX_ONLY_COS_INDEX 1 276#define FIRST_TX_ONLY_COS_INDEX 1
277#define FIRST_TX_COS_INDEX 0 277#define FIRST_TX_COS_INDEX 0
278 278
279/* defines for decodeing the fastpath index and the cos index out of the
280 * transmission queue index
281 */
282#define MAX_TXQS_PER_COS FP_SB_MAX_E1x
283
284#define TXQ_TO_FP(txq_index) ((txq_index) % MAX_TXQS_PER_COS)
285#define TXQ_TO_COS(txq_index) ((txq_index) / MAX_TXQS_PER_COS)
286
287/* rules for calculating the cids of tx-only connections */ 279/* rules for calculating the cids of tx-only connections */
288#define CID_TO_FP(cid) ((cid) % MAX_TXQS_PER_COS) 280#define CID_TO_FP(cid, bp) ((cid) % BNX2X_NUM_NON_CNIC_QUEUES(bp))
289#define CID_COS_TO_TX_ONLY_CID(cid, cos) (cid + cos * MAX_TXQS_PER_COS) 281#define CID_COS_TO_TX_ONLY_CID(cid, cos, bp) \
282 (cid + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp))
290 283
291/* fp index inside class of service range */ 284/* fp index inside class of service range */
292#define FP_COS_TO_TXQ(fp, cos) ((fp)->index + cos * MAX_TXQS_PER_COS) 285#define FP_COS_TO_TXQ(fp, cos, bp) \
293 286 ((fp)->index + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp))
294/* 287
295 * 0..15 eth cos0 288/* Indexes for transmission queues array:
296 * 16..31 eth cos1 if applicable 289 * txdata for RSS i CoS j is at location i + (j * num of RSS)
297 * 32..47 eth cos2 If applicable 290 * txdata for FCoE (if exist) is at location max cos * num of RSS
298 * fcoe queue follows eth queues (16, 32, 48 depending on cos) 291 * txdata for FWD (if exist) is one location after FCoE
292 * txdata for OOO (if exist) is one location after FWD
299 */ 293 */
300#define MAX_ETH_TXQ_IDX(bp) (MAX_TXQS_PER_COS * (bp)->max_cos) 294enum {
301#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp)) 295 FCOE_TXQ_IDX_OFFSET,
296 FWD_TXQ_IDX_OFFSET,
297 OOO_TXQ_IDX_OFFSET,
298};
299#define MAX_ETH_TXQ_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * (bp)->max_cos)
300#ifdef BCM_CNIC
301#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp) + FCOE_TXQ_IDX_OFFSET)
302#endif
302 303
303/* fast path */ 304/* fast path */
304/* 305/*
@@ -453,6 +454,7 @@ struct bnx2x_agg_info {
453 u16 vlan_tag; 454 u16 vlan_tag;
454 u16 len_on_bd; 455 u16 len_on_bd;
455 u32 rxhash; 456 u32 rxhash;
457 bool l4_rxhash;
456 u16 gro_size; 458 u16 gro_size;
457 u16 full_page; 459 u16 full_page;
458}; 460};
@@ -481,6 +483,8 @@ struct bnx2x_fp_txdata {
481 __le16 *tx_cons_sb; 483 __le16 *tx_cons_sb;
482 484
483 int txq_index; 485 int txq_index;
486 struct bnx2x_fastpath *parent_fp;
487 int tx_ring_size;
484}; 488};
485 489
486enum bnx2x_tpa_mode_t { 490enum bnx2x_tpa_mode_t {
@@ -507,7 +511,7 @@ struct bnx2x_fastpath {
507 enum bnx2x_tpa_mode_t mode; 511 enum bnx2x_tpa_mode_t mode;
508 512
509 u8 max_cos; /* actual number of active tx coses */ 513 u8 max_cos; /* actual number of active tx coses */
510 struct bnx2x_fp_txdata txdata[BNX2X_MULTI_TX_COS]; 514 struct bnx2x_fp_txdata *txdata_ptr[BNX2X_MULTI_TX_COS];
511 515
512 struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */ 516 struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */
513 struct sw_rx_page *rx_page_ring; /* SGE pages mappings ring */ 517 struct sw_rx_page *rx_page_ring; /* SGE pages mappings ring */
@@ -547,51 +551,45 @@ struct bnx2x_fastpath {
547 rx_calls; 551 rx_calls;
548 552
549 /* TPA related */ 553 /* TPA related */
550 struct bnx2x_agg_info tpa_info[ETH_MAX_AGGREGATION_QUEUES_E1H_E2]; 554 struct bnx2x_agg_info *tpa_info;
551 u8 disable_tpa; 555 u8 disable_tpa;
552#ifdef BNX2X_STOP_ON_ERROR 556#ifdef BNX2X_STOP_ON_ERROR
553 u64 tpa_queue_used; 557 u64 tpa_queue_used;
554#endif 558#endif
555
556 struct tstorm_per_queue_stats old_tclient;
557 struct ustorm_per_queue_stats old_uclient;
558 struct xstorm_per_queue_stats old_xclient;
559 struct bnx2x_eth_q_stats eth_q_stats;
560 struct bnx2x_eth_q_stats_old eth_q_stats_old;
561
562 /* The size is calculated using the following: 559 /* The size is calculated using the following:
563 sizeof name field from netdev structure + 560 sizeof name field from netdev structure +
564 4 ('-Xx-' string) + 561 4 ('-Xx-' string) +
565 4 (for the digits and to make it DWORD aligned) */ 562 4 (for the digits and to make it DWORD aligned) */
566#define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8) 563#define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
567 char name[FP_NAME_SIZE]; 564 char name[FP_NAME_SIZE];
568
569 /* MACs object */
570 struct bnx2x_vlan_mac_obj mac_obj;
571
572 /* Queue State object */
573 struct bnx2x_queue_sp_obj q_obj;
574
575}; 565};
576 566
577#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) 567#define bnx2x_fp(bp, nr, var) ((bp)->fp[(nr)].var)
568#define bnx2x_sp_obj(bp, fp) ((bp)->sp_objs[(fp)->index])
569#define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index]))
570#define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))
578 571
579/* Use 2500 as a mini-jumbo MTU for FCoE */ 572/* Use 2500 as a mini-jumbo MTU for FCoE */
580#define BNX2X_FCOE_MINI_JUMBO_MTU 2500 573#define BNX2X_FCOE_MINI_JUMBO_MTU 2500
581 574
582/* FCoE L2 `fastpath' entry is right after the eth entries */ 575#define FCOE_IDX_OFFSET 0
583#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp) 576
584#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX]) 577#define FCOE_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) + \
585#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var) 578 FCOE_IDX_OFFSET)
586#define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \ 579#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX(bp)])
587 txdata[FIRST_TX_COS_INDEX].var) 580#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var)
581#define bnx2x_fcoe_inner_sp_obj(bp) (&bp->sp_objs[FCOE_IDX(bp)])
582#define bnx2x_fcoe_sp_obj(bp, var) (bnx2x_fcoe_inner_sp_obj(bp)->var)
583#define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \
584 txdata_ptr[FIRST_TX_COS_INDEX] \
585 ->var)
588 586
589 587
590#define IS_ETH_FP(fp) (fp->index < \ 588#define IS_ETH_FP(fp) (fp->index < \
591 BNX2X_NUM_ETH_QUEUES(fp->bp)) 589 BNX2X_NUM_ETH_QUEUES(fp->bp))
592#ifdef BCM_CNIC 590#ifdef BCM_CNIC
593#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX) 591#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX(fp->bp))
594#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX) 592#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(bp))
595#else 593#else
596#define IS_FCOE_FP(fp) false 594#define IS_FCOE_FP(fp) false
597#define IS_FCOE_IDX(idx) false 595#define IS_FCOE_IDX(idx) false
@@ -616,6 +614,22 @@ struct bnx2x_fastpath {
616#define TX_BD(x) ((x) & MAX_TX_BD) 614#define TX_BD(x) ((x) & MAX_TX_BD)
617#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT) 615#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT)
618 616
617/* number of NEXT_PAGE descriptors may be required during placement */
618#define NEXT_CNT_PER_TX_PKT(bds) \
619 (((bds) + MAX_TX_DESC_CNT - 1) / \
620 MAX_TX_DESC_CNT * NEXT_PAGE_TX_DESC_CNT)
621/* max BDs per tx packet w/o next_pages:
622 * START_BD - describes packed
623 * START_BD(splitted) - includes unpaged data segment for GSO
624 * PARSING_BD - for TSO and CSUM data
625 * Frag BDs - decribes pages for frags
626 */
627#define BDS_PER_TX_PKT 3
628#define MAX_BDS_PER_TX_PKT (MAX_SKB_FRAGS + BDS_PER_TX_PKT)
629/* max BDs per tx packet including next pages */
630#define MAX_DESC_PER_TX_PKT (MAX_BDS_PER_TX_PKT + \
631 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))
632
619/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */ 633/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
620#define NUM_RX_RINGS 8 634#define NUM_RX_RINGS 8
621#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) 635#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
@@ -978,8 +992,8 @@ union cdu_context {
978}; 992};
979 993
980/* CDU host DB constants */ 994/* CDU host DB constants */
981#define CDU_ILT_PAGE_SZ_HW 3 995#define CDU_ILT_PAGE_SZ_HW 2
982#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 64K */ 996#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 32K */
983#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context)) 997#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context))
984 998
985#ifdef BCM_CNIC 999#ifdef BCM_CNIC
@@ -1182,11 +1196,31 @@ struct bnx2x_prev_path_list {
1182 struct list_head list; 1196 struct list_head list;
1183}; 1197};
1184 1198
1199struct bnx2x_sp_objs {
1200 /* MACs object */
1201 struct bnx2x_vlan_mac_obj mac_obj;
1202
1203 /* Queue State object */
1204 struct bnx2x_queue_sp_obj q_obj;
1205};
1206
1207struct bnx2x_fp_stats {
1208 struct tstorm_per_queue_stats old_tclient;
1209 struct ustorm_per_queue_stats old_uclient;
1210 struct xstorm_per_queue_stats old_xclient;
1211 struct bnx2x_eth_q_stats eth_q_stats;
1212 struct bnx2x_eth_q_stats_old eth_q_stats_old;
1213};
1214
1185struct bnx2x { 1215struct bnx2x {
1186 /* Fields used in the tx and intr/napi performance paths 1216 /* Fields used in the tx and intr/napi performance paths
1187 * are grouped together in the beginning of the structure 1217 * are grouped together in the beginning of the structure
1188 */ 1218 */
1189 struct bnx2x_fastpath *fp; 1219 struct bnx2x_fastpath *fp;
1220 struct bnx2x_sp_objs *sp_objs;
1221 struct bnx2x_fp_stats *fp_stats;
1222 struct bnx2x_fp_txdata *bnx2x_txq;
1223 int bnx2x_txq_size;
1190 void __iomem *regview; 1224 void __iomem *regview;
1191 void __iomem *doorbells; 1225 void __iomem *doorbells;
1192 u16 db_size; 1226 u16 db_size;
@@ -1301,7 +1335,9 @@ struct bnx2x {
1301#define NO_ISCSI_FLAG (1 << 14) 1335#define NO_ISCSI_FLAG (1 << 14)
1302#define NO_FCOE_FLAG (1 << 15) 1336#define NO_FCOE_FLAG (1 << 15)
1303#define BC_SUPPORTS_PFC_STATS (1 << 17) 1337#define BC_SUPPORTS_PFC_STATS (1 << 17)
1338#define BC_SUPPORTS_FCOE_FEATURES (1 << 19)
1304#define USING_SINGLE_MSIX_FLAG (1 << 20) 1339#define USING_SINGLE_MSIX_FLAG (1 << 20)
1340#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21)
1305 1341
1306#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG) 1342#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
1307#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG) 1343#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
@@ -1377,6 +1413,7 @@ struct bnx2x {
1377#define BNX2X_MAX_COS 3 1413#define BNX2X_MAX_COS 3
1378#define BNX2X_MAX_TX_COS 2 1414#define BNX2X_MAX_TX_COS 2
1379 int num_queues; 1415 int num_queues;
1416 int num_napi_queues;
1380 int disable_tpa; 1417 int disable_tpa;
1381 1418
1382 u32 rx_mode; 1419 u32 rx_mode;
@@ -1389,6 +1426,7 @@ struct bnx2x {
1389 u8 igu_dsb_id; 1426 u8 igu_dsb_id;
1390 u8 igu_base_sb; 1427 u8 igu_base_sb;
1391 u8 igu_sb_cnt; 1428 u8 igu_sb_cnt;
1429
1392 dma_addr_t def_status_blk_mapping; 1430 dma_addr_t def_status_blk_mapping;
1393 1431
1394 struct bnx2x_slowpath *slowpath; 1432 struct bnx2x_slowpath *slowpath;
@@ -1420,7 +1458,11 @@ struct bnx2x {
1420 dma_addr_t fw_stats_data_mapping; 1458 dma_addr_t fw_stats_data_mapping;
1421 int fw_stats_data_sz; 1459 int fw_stats_data_sz;
1422 1460
1423 struct hw_context context; 1461 /* For max 196 cids (64*3 + non-eth), 32KB ILT page size and 1KB
1462 * context size we need 8 ILT entries.
1463 */
1464#define ILT_MAX_L2_LINES 8
1465 struct hw_context context[ILT_MAX_L2_LINES];
1424 1466
1425 struct bnx2x_ilt *ilt; 1467 struct bnx2x_ilt *ilt;
1426#define BP_ILT(bp) ((bp)->ilt) 1468#define BP_ILT(bp) ((bp)->ilt)
@@ -1433,13 +1475,14 @@ struct bnx2x {
1433 1475
1434/* 1476/*
1435 * Maximum CID count that might be required by the bnx2x: 1477 * Maximum CID count that might be required by the bnx2x:
1436 * Max Tss * Max_Tx_Multi_Cos + CNIC L2 Clients (FCoE and iSCSI related) 1478 * Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI
1437 */ 1479 */
1438#define BNX2X_L2_CID_COUNT(bp) (MAX_TXQS_PER_COS * BNX2X_MULTI_TX_COS +\ 1480#define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \
1439 NON_ETH_CONTEXT_USE + CNIC_PRESENT) 1481 + NON_ETH_CONTEXT_USE + CNIC_PRESENT)
1482#define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \
1483 + NON_ETH_CONTEXT_USE + CNIC_PRESENT)
1440#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\ 1484#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
1441 ILT_PAGE_CIDS)) 1485 ILT_PAGE_CIDS))
1442#define BNX2X_DB_SIZE(bp) (BNX2X_L2_CID_COUNT(bp) * (1 << BNX2X_DB_SHIFT))
1443 1486
1444 int qm_cid_count; 1487 int qm_cid_count;
1445 1488
@@ -1598,6 +1641,8 @@ struct bnx2x {
1598extern int num_queues; 1641extern int num_queues;
1599#define BNX2X_NUM_QUEUES(bp) (bp->num_queues) 1642#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
1600#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE) 1643#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE)
1644#define BNX2X_NUM_NON_CNIC_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - \
1645 NON_ETH_CONTEXT_USE)
1601#define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp) 1646#define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp)
1602 1647
1603#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) 1648#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
@@ -1656,6 +1701,9 @@ struct bnx2x_func_init_params {
1656 continue; \ 1701 continue; \
1657 else 1702 else
1658 1703
1704#define for_each_napi_rx_queue(bp, var) \
1705 for ((var) = 0; (var) < bp->num_napi_queues; (var)++)
1706
1659/* Skip OOO FP */ 1707/* Skip OOO FP */
1660#define for_each_tx_queue(bp, var) \ 1708#define for_each_tx_queue(bp, var) \
1661 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ 1709 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
@@ -1709,15 +1757,6 @@ int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
1709 struct bnx2x_vlan_mac_obj *obj, bool set, 1757 struct bnx2x_vlan_mac_obj *obj, bool set,
1710 int mac_type, unsigned long *ramrod_flags); 1758 int mac_type, unsigned long *ramrod_flags);
1711/** 1759/**
1712 * Deletes all MACs configured for the specific MAC object.
1713 *
1714 * @param bp Function driver instance
1715 * @param mac_obj MAC object to cleanup
1716 *
1717 * @return zero if all MACs were cleaned
1718 */
1719
1720/**
1721 * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object 1760 * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object
1722 * 1761 *
1723 * @bp: driver handle 1762 * @bp: driver handle
@@ -1817,6 +1856,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1817#define LOAD_NORMAL 0 1856#define LOAD_NORMAL 0
1818#define LOAD_OPEN 1 1857#define LOAD_OPEN 1
1819#define LOAD_DIAG 2 1858#define LOAD_DIAG 2
1859#define LOAD_LOOPBACK_EXT 3
1820#define UNLOAD_NORMAL 0 1860#define UNLOAD_NORMAL 0
1821#define UNLOAD_CLOSE 1 1861#define UNLOAD_CLOSE 1
1822#define UNLOAD_RECOVERY 2 1862#define UNLOAD_RECOVERY 2
@@ -1899,13 +1939,17 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1899#define PCICFG_LINK_SPEED 0xf0000 1939#define PCICFG_LINK_SPEED 0xf0000
1900#define PCICFG_LINK_SPEED_SHIFT 16 1940#define PCICFG_LINK_SPEED_SHIFT 16
1901 1941
1902 1942#define BNX2X_NUM_TESTS_SF 7
1903#define BNX2X_NUM_TESTS 7 1943#define BNX2X_NUM_TESTS_MF 3
1944#define BNX2X_NUM_TESTS(bp) (IS_MF(bp) ? BNX2X_NUM_TESTS_MF : \
1945 BNX2X_NUM_TESTS_SF)
1904 1946
1905#define BNX2X_PHY_LOOPBACK 0 1947#define BNX2X_PHY_LOOPBACK 0
1906#define BNX2X_MAC_LOOPBACK 1 1948#define BNX2X_MAC_LOOPBACK 1
1949#define BNX2X_EXT_LOOPBACK 2
1907#define BNX2X_PHY_LOOPBACK_FAILED 1 1950#define BNX2X_PHY_LOOPBACK_FAILED 1
1908#define BNX2X_MAC_LOOPBACK_FAILED 2 1951#define BNX2X_MAC_LOOPBACK_FAILED 2
1952#define BNX2X_EXT_LOOPBACK_FAILED 3
1909#define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \ 1953#define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \
1910 BNX2X_PHY_LOOPBACK_FAILED) 1954 BNX2X_PHY_LOOPBACK_FAILED)
1911 1955
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 8098eea9704d..e879e19eb0d6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -40,12 +40,19 @@
40 * Makes sure the contents of the bp->fp[to].napi is kept 40 * Makes sure the contents of the bp->fp[to].napi is kept
41 * intact. This is done by first copying the napi struct from 41 * intact. This is done by first copying the napi struct from
42 * the target to the source, and then mem copying the entire 42 * the target to the source, and then mem copying the entire
43 * source onto the target 43 * source onto the target. Update txdata pointers and related
44 * content.
44 */ 45 */
45static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) 46static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
46{ 47{
47 struct bnx2x_fastpath *from_fp = &bp->fp[from]; 48 struct bnx2x_fastpath *from_fp = &bp->fp[from];
48 struct bnx2x_fastpath *to_fp = &bp->fp[to]; 49 struct bnx2x_fastpath *to_fp = &bp->fp[to];
50 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
51 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
52 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
53 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
54 int old_max_eth_txqs, new_max_eth_txqs;
55 int old_txdata_index = 0, new_txdata_index = 0;
49 56
50 /* Copy the NAPI object as it has been already initialized */ 57 /* Copy the NAPI object as it has been already initialized */
51 from_fp->napi = to_fp->napi; 58 from_fp->napi = to_fp->napi;
@@ -53,6 +60,30 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
53 /* Move bnx2x_fastpath contents */ 60 /* Move bnx2x_fastpath contents */
54 memcpy(to_fp, from_fp, sizeof(*to_fp)); 61 memcpy(to_fp, from_fp, sizeof(*to_fp));
55 to_fp->index = to; 62 to_fp->index = to;
63
64 /* move sp_objs contents as well, as their indices match fp ones */
65 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
66
67 /* move fp_stats contents as well, as their indices match fp ones */
68 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
69
70 /* Update txdata pointers in fp and move txdata content accordingly:
71 * Each fp consumes 'max_cos' txdata structures, so the index should be
72 * decremented by max_cos x delta.
73 */
74
75 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
76 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
77 (bp)->max_cos;
78 if (from == FCOE_IDX(bp)) {
79 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
80 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
81 }
82
83 memcpy(&bp->bnx2x_txq[old_txdata_index],
84 &bp->bnx2x_txq[new_txdata_index],
85 sizeof(struct bnx2x_fp_txdata));
86 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
56} 87}
57 88
58int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ 89int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
@@ -190,7 +221,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
190 221
191 if ((netif_tx_queue_stopped(txq)) && 222 if ((netif_tx_queue_stopped(txq)) &&
192 (bp->state == BNX2X_STATE_OPEN) && 223 (bp->state == BNX2X_STATE_OPEN) &&
193 (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4)) 224 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
194 netif_tx_wake_queue(txq); 225 netif_tx_wake_queue(txq);
195 226
196 __netif_tx_unlock(txq); 227 __netif_tx_unlock(txq);
@@ -264,12 +295,20 @@ static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
264 * CQE (calculated by HW). 295 * CQE (calculated by HW).
265 */ 296 */
266static u32 bnx2x_get_rxhash(const struct bnx2x *bp, 297static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
267 const struct eth_fast_path_rx_cqe *cqe) 298 const struct eth_fast_path_rx_cqe *cqe,
299 bool *l4_rxhash)
268{ 300{
269 /* Set Toeplitz hash from CQE */ 301 /* Set Toeplitz hash from CQE */
270 if ((bp->dev->features & NETIF_F_RXHASH) && 302 if ((bp->dev->features & NETIF_F_RXHASH) &&
271 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) 303 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
304 enum eth_rss_hash_type htype;
305
306 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
307 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
308 (htype == TCP_IPV6_HASH_TYPE);
272 return le32_to_cpu(cqe->rss_hash_result); 309 return le32_to_cpu(cqe->rss_hash_result);
310 }
311 *l4_rxhash = false;
273 return 0; 312 return 0;
274} 313}
275 314
@@ -323,7 +362,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
323 tpa_info->tpa_state = BNX2X_TPA_START; 362 tpa_info->tpa_state = BNX2X_TPA_START;
324 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd); 363 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
325 tpa_info->placement_offset = cqe->placement_offset; 364 tpa_info->placement_offset = cqe->placement_offset;
326 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe); 365 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
327 if (fp->mode == TPA_MODE_GRO) { 366 if (fp->mode == TPA_MODE_GRO) {
328 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len); 367 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
329 tpa_info->full_page = 368 tpa_info->full_page =
@@ -479,7 +518,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
479 where we are and drop the whole packet */ 518 where we are and drop the whole packet */
480 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx); 519 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
481 if (unlikely(err)) { 520 if (unlikely(err)) {
482 fp->eth_q_stats.rx_skb_alloc_failed++; 521 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
483 return err; 522 return err;
484 } 523 }
485 524
@@ -558,6 +597,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
558 skb_reserve(skb, pad + NET_SKB_PAD); 597 skb_reserve(skb, pad + NET_SKB_PAD);
559 skb_put(skb, len); 598 skb_put(skb, len);
560 skb->rxhash = tpa_info->rxhash; 599 skb->rxhash = tpa_info->rxhash;
600 skb->l4_rxhash = tpa_info->l4_rxhash;
561 601
562 skb->protocol = eth_type_trans(skb, bp->dev); 602 skb->protocol = eth_type_trans(skb, bp->dev);
563 skb->ip_summed = CHECKSUM_UNNECESSARY; 603 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -584,7 +624,7 @@ drop:
584 /* drop the packet and keep the buffer in the bin */ 624 /* drop the packet and keep the buffer in the bin */
585 DP(NETIF_MSG_RX_STATUS, 625 DP(NETIF_MSG_RX_STATUS,
586 "Failed to allocate or map a new skb - dropping packet!\n"); 626 "Failed to allocate or map a new skb - dropping packet!\n");
587 fp->eth_q_stats.rx_skb_alloc_failed++; 627 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
588} 628}
589 629
590static int bnx2x_alloc_rx_data(struct bnx2x *bp, 630static int bnx2x_alloc_rx_data(struct bnx2x *bp,
@@ -617,8 +657,10 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp,
617 return 0; 657 return 0;
618} 658}
619 659
620static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe, 660static
621 struct bnx2x_fastpath *fp) 661void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
662 struct bnx2x_fastpath *fp,
663 struct bnx2x_eth_q_stats *qstats)
622{ 664{
623 /* Do nothing if no IP/L4 csum validation was done */ 665 /* Do nothing if no IP/L4 csum validation was done */
624 666
@@ -632,7 +674,7 @@ static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
632 if (cqe->fast_path_cqe.type_error_flags & 674 if (cqe->fast_path_cqe.type_error_flags &
633 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | 675 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
634 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) 676 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
635 fp->eth_q_stats.hw_csum_err++; 677 qstats->hw_csum_err++;
636 else 678 else
637 skb->ip_summed = CHECKSUM_UNNECESSARY; 679 skb->ip_summed = CHECKSUM_UNNECESSARY;
638} 680}
@@ -679,6 +721,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
679 enum eth_rx_cqe_type cqe_fp_type; 721 enum eth_rx_cqe_type cqe_fp_type;
680 u16 len, pad, queue; 722 u16 len, pad, queue;
681 u8 *data; 723 u8 *data;
724 bool l4_rxhash;
682 725
683#ifdef BNX2X_STOP_ON_ERROR 726#ifdef BNX2X_STOP_ON_ERROR
684 if (unlikely(bp->panic)) 727 if (unlikely(bp->panic))
@@ -776,7 +819,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
776 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, 819 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
777 "ERROR flags %x rx packet %u\n", 820 "ERROR flags %x rx packet %u\n",
778 cqe_fp_flags, sw_comp_cons); 821 cqe_fp_flags, sw_comp_cons);
779 fp->eth_q_stats.rx_err_discard_pkt++; 822 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
780 goto reuse_rx; 823 goto reuse_rx;
781 } 824 }
782 825
@@ -789,7 +832,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
789 if (skb == NULL) { 832 if (skb == NULL) {
790 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, 833 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
791 "ERROR packet dropped because of alloc failure\n"); 834 "ERROR packet dropped because of alloc failure\n");
792 fp->eth_q_stats.rx_skb_alloc_failed++; 835 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
793 goto reuse_rx; 836 goto reuse_rx;
794 } 837 }
795 memcpy(skb->data, data + pad, len); 838 memcpy(skb->data, data + pad, len);
@@ -803,14 +846,15 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
803 skb = build_skb(data, 0); 846 skb = build_skb(data, 0);
804 if (unlikely(!skb)) { 847 if (unlikely(!skb)) {
805 kfree(data); 848 kfree(data);
806 fp->eth_q_stats.rx_skb_alloc_failed++; 849 bnx2x_fp_qstats(bp, fp)->
850 rx_skb_alloc_failed++;
807 goto next_rx; 851 goto next_rx;
808 } 852 }
809 skb_reserve(skb, pad); 853 skb_reserve(skb, pad);
810 } else { 854 } else {
811 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, 855 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
812 "ERROR packet dropped because of alloc failure\n"); 856 "ERROR packet dropped because of alloc failure\n");
813 fp->eth_q_stats.rx_skb_alloc_failed++; 857 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
814reuse_rx: 858reuse_rx:
815 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod); 859 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
816 goto next_rx; 860 goto next_rx;
@@ -821,13 +865,14 @@ reuse_rx:
821 skb->protocol = eth_type_trans(skb, bp->dev); 865 skb->protocol = eth_type_trans(skb, bp->dev);
822 866
823 /* Set Toeplitz hash for a none-LRO skb */ 867 /* Set Toeplitz hash for a none-LRO skb */
824 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp); 868 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
869 skb->l4_rxhash = l4_rxhash;
825 870
826 skb_checksum_none_assert(skb); 871 skb_checksum_none_assert(skb);
827 872
828 if (bp->dev->features & NETIF_F_RXCSUM) 873 if (bp->dev->features & NETIF_F_RXCSUM)
829 bnx2x_csum_validate(skb, cqe, fp); 874 bnx2x_csum_validate(skb, cqe, fp,
830 875 bnx2x_fp_qstats(bp, fp));
831 876
832 skb_record_rx_queue(skb, fp->rx_queue); 877 skb_record_rx_queue(skb, fp->rx_queue);
833 878
@@ -888,7 +933,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
888 prefetch(fp->rx_cons_sb); 933 prefetch(fp->rx_cons_sb);
889 934
890 for_each_cos_in_tx_queue(fp, cos) 935 for_each_cos_in_tx_queue(fp, cos)
891 prefetch(fp->txdata[cos].tx_cons_sb); 936 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
892 937
893 prefetch(&fp->sb_running_index[SM_RX_ID]); 938 prefetch(&fp->sb_running_index[SM_RX_ID]);
894 napi_schedule(&bnx2x_fp(bp, fp->index, napi)); 939 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
@@ -1205,7 +1250,7 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1205 for_each_tx_queue(bp, i) { 1250 for_each_tx_queue(bp, i) {
1206 struct bnx2x_fastpath *fp = &bp->fp[i]; 1251 struct bnx2x_fastpath *fp = &bp->fp[i];
1207 for_each_cos_in_tx_queue(fp, cos) { 1252 for_each_cos_in_tx_queue(fp, cos) {
1208 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; 1253 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1209 unsigned pkts_compl = 0, bytes_compl = 0; 1254 unsigned pkts_compl = 0, bytes_compl = 0;
1210 1255
1211 u16 sw_prod = txdata->tx_pkt_prod; 1256 u16 sw_prod = txdata->tx_pkt_prod;
@@ -1217,7 +1262,8 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1217 sw_cons++; 1262 sw_cons++;
1218 } 1263 }
1219 netdev_tx_reset_queue( 1264 netdev_tx_reset_queue(
1220 netdev_get_tx_queue(bp->dev, txdata->txq_index)); 1265 netdev_get_tx_queue(bp->dev,
1266 txdata->txq_index));
1221 } 1267 }
1222 } 1268 }
1223} 1269}
@@ -1325,7 +1371,7 @@ void bnx2x_free_irq(struct bnx2x *bp)
1325 free_irq(bp->dev->irq, bp->dev); 1371 free_irq(bp->dev->irq, bp->dev);
1326} 1372}
1327 1373
1328int __devinit bnx2x_enable_msix(struct bnx2x *bp) 1374int bnx2x_enable_msix(struct bnx2x *bp)
1329{ 1375{
1330 int msix_vec = 0, i, rc, req_cnt; 1376 int msix_vec = 0, i, rc, req_cnt;
1331 1377
@@ -1579,6 +1625,8 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
1579#endif 1625#endif
1580 /* Add special queues */ 1626 /* Add special queues */
1581 bp->num_queues += NON_ETH_CONTEXT_USE; 1627 bp->num_queues += NON_ETH_CONTEXT_USE;
1628
1629 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1582} 1630}
1583 1631
1584/** 1632/**
@@ -1607,8 +1655,8 @@ static int bnx2x_set_real_num_queues(struct bnx2x *bp)
1607{ 1655{
1608 int rc, tx, rx; 1656 int rc, tx, rx;
1609 1657
1610 tx = MAX_TXQS_PER_COS * bp->max_cos; 1658 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1611 rx = BNX2X_NUM_ETH_QUEUES(bp); 1659 rx = BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE;
1612 1660
1613/* account for fcoe queue */ 1661/* account for fcoe queue */
1614#ifdef BCM_CNIC 1662#ifdef BCM_CNIC
@@ -1666,14 +1714,13 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1666static int bnx2x_init_rss_pf(struct bnx2x *bp) 1714static int bnx2x_init_rss_pf(struct bnx2x *bp)
1667{ 1715{
1668 int i; 1716 int i;
1669 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1670 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); 1717 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1671 1718
1672 /* Prepare the initial contents fo the indirection table if RSS is 1719 /* Prepare the initial contents fo the indirection table if RSS is
1673 * enabled 1720 * enabled
1674 */ 1721 */
1675 for (i = 0; i < sizeof(ind_table); i++) 1722 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1676 ind_table[i] = 1723 bp->rss_conf_obj.ind_table[i] =
1677 bp->fp->cl_id + 1724 bp->fp->cl_id +
1678 ethtool_rxfh_indir_default(i, num_eth_queues); 1725 ethtool_rxfh_indir_default(i, num_eth_queues);
1679 1726
@@ -1685,12 +1732,11 @@ static int bnx2x_init_rss_pf(struct bnx2x *bp)
1685 * For 57712 and newer on the other hand it's a per-function 1732 * For 57712 and newer on the other hand it's a per-function
1686 * configuration. 1733 * configuration.
1687 */ 1734 */
1688 return bnx2x_config_rss_eth(bp, ind_table, 1735 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
1689 bp->port.pmf || !CHIP_IS_E1x(bp));
1690} 1736}
1691 1737
1692int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, 1738int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1693 u8 *ind_table, bool config_hash) 1739 bool config_hash)
1694{ 1740{
1695 struct bnx2x_config_rss_params params = {NULL}; 1741 struct bnx2x_config_rss_params params = {NULL};
1696 int i; 1742 int i;
@@ -1713,11 +1759,15 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1713 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags); 1759 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1714 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags); 1760 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1715 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags); 1761 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1762 if (rss_obj->udp_rss_v4)
1763 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
1764 if (rss_obj->udp_rss_v6)
1765 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
1716 1766
1717 /* Hash bits */ 1767 /* Hash bits */
1718 params.rss_result_mask = MULTI_MASK; 1768 params.rss_result_mask = MULTI_MASK;
1719 1769
1720 memcpy(params.ind_table, ind_table, sizeof(params.ind_table)); 1770 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
1721 1771
1722 if (config_hash) { 1772 if (config_hash) {
1723 /* RSS keys */ 1773 /* RSS keys */
@@ -1754,7 +1804,7 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
1754 int rc; 1804 int rc;
1755 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 1805 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1756 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 1806 struct bnx2x_mcast_ramrod_params rparam = {NULL};
1757 struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj; 1807 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
1758 1808
1759 /***************** Cleanup MACs' object first *************************/ 1809 /***************** Cleanup MACs' object first *************************/
1760 1810
@@ -1765,7 +1815,7 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
1765 1815
1766 /* Clean ETH primary MAC */ 1816 /* Clean ETH primary MAC */
1767 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags); 1817 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1768 rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags, 1818 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
1769 &ramrod_flags); 1819 &ramrod_flags);
1770 if (rc != 0) 1820 if (rc != 0)
1771 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc); 1821 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
@@ -1851,11 +1901,16 @@ bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1851static void bnx2x_bz_fp(struct bnx2x *bp, int index) 1901static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1852{ 1902{
1853 struct bnx2x_fastpath *fp = &bp->fp[index]; 1903 struct bnx2x_fastpath *fp = &bp->fp[index];
1904 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index];
1905
1906 int cos;
1854 struct napi_struct orig_napi = fp->napi; 1907 struct napi_struct orig_napi = fp->napi;
1908 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
1855 /* bzero bnx2x_fastpath contents */ 1909 /* bzero bnx2x_fastpath contents */
1856 if (bp->stats_init) 1910 if (bp->stats_init) {
1911 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
1857 memset(fp, 0, sizeof(*fp)); 1912 memset(fp, 0, sizeof(*fp));
1858 else { 1913 } else {
1859 /* Keep Queue statistics */ 1914 /* Keep Queue statistics */
1860 struct bnx2x_eth_q_stats *tmp_eth_q_stats; 1915 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
1861 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old; 1916 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
@@ -1863,26 +1918,27 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1863 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats), 1918 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
1864 GFP_KERNEL); 1919 GFP_KERNEL);
1865 if (tmp_eth_q_stats) 1920 if (tmp_eth_q_stats)
1866 memcpy(tmp_eth_q_stats, &fp->eth_q_stats, 1921 memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats,
1867 sizeof(struct bnx2x_eth_q_stats)); 1922 sizeof(struct bnx2x_eth_q_stats));
1868 1923
1869 tmp_eth_q_stats_old = 1924 tmp_eth_q_stats_old =
1870 kzalloc(sizeof(struct bnx2x_eth_q_stats_old), 1925 kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
1871 GFP_KERNEL); 1926 GFP_KERNEL);
1872 if (tmp_eth_q_stats_old) 1927 if (tmp_eth_q_stats_old)
1873 memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old, 1928 memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old,
1874 sizeof(struct bnx2x_eth_q_stats_old)); 1929 sizeof(struct bnx2x_eth_q_stats_old));
1875 1930
1931 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
1876 memset(fp, 0, sizeof(*fp)); 1932 memset(fp, 0, sizeof(*fp));
1877 1933
1878 if (tmp_eth_q_stats) { 1934 if (tmp_eth_q_stats) {
1879 memcpy(&fp->eth_q_stats, tmp_eth_q_stats, 1935 memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats,
1880 sizeof(struct bnx2x_eth_q_stats)); 1936 sizeof(struct bnx2x_eth_q_stats));
1881 kfree(tmp_eth_q_stats); 1937 kfree(tmp_eth_q_stats);
1882 } 1938 }
1883 1939
1884 if (tmp_eth_q_stats_old) { 1940 if (tmp_eth_q_stats_old) {
1885 memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old, 1941 memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old,
1886 sizeof(struct bnx2x_eth_q_stats_old)); 1942 sizeof(struct bnx2x_eth_q_stats_old));
1887 kfree(tmp_eth_q_stats_old); 1943 kfree(tmp_eth_q_stats_old);
1888 } 1944 }
@@ -1891,7 +1947,7 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1891 1947
1892 /* Restore the NAPI object as it has been already initialized */ 1948 /* Restore the NAPI object as it has been already initialized */
1893 fp->napi = orig_napi; 1949 fp->napi = orig_napi;
1894 1950 fp->tpa_info = orig_tpa_info;
1895 fp->bp = bp; 1951 fp->bp = bp;
1896 fp->index = index; 1952 fp->index = index;
1897 if (IS_ETH_FP(fp)) 1953 if (IS_ETH_FP(fp))
@@ -1900,6 +1956,16 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1900 /* Special queues support only one CoS */ 1956 /* Special queues support only one CoS */
1901 fp->max_cos = 1; 1957 fp->max_cos = 1;
1902 1958
1959 /* Init txdata pointers */
1960#ifdef BCM_CNIC
1961 if (IS_FCOE_FP(fp))
1962 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
1963#endif
1964 if (IS_ETH_FP(fp))
1965 for_each_cos_in_tx_queue(fp, cos)
1966 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
1967 BNX2X_NUM_ETH_QUEUES(bp) + index];
1968
1903 /* 1969 /*
1904 * set the tpa flag for each queue. The tpa flag determines the queue 1970 * set the tpa flag for each queue. The tpa flag determines the queue
1905 * minimal size so it must be set prior to queue memory allocation 1971 * minimal size so it must be set prior to queue memory allocation
@@ -1949,11 +2015,13 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1949 /* 2015 /*
1950 * Zero fastpath structures preserving invariants like napi, which are 2016 * Zero fastpath structures preserving invariants like napi, which are
1951 * allocated only once, fp index, max_cos, bp pointer. 2017 * allocated only once, fp index, max_cos, bp pointer.
1952 * Also set fp->disable_tpa. 2018 * Also set fp->disable_tpa and txdata_ptr.
1953 */ 2019 */
1954 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues); 2020 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
1955 for_each_queue(bp, i) 2021 for_each_queue(bp, i)
1956 bnx2x_bz_fp(bp, i); 2022 bnx2x_bz_fp(bp, i);
2023 memset(bp->bnx2x_txq, 0, bp->bnx2x_txq_size *
2024 sizeof(struct bnx2x_fp_txdata));
1957 2025
1958 2026
1959 /* Set the receive queues buffer size */ 2027 /* Set the receive queues buffer size */
@@ -2176,6 +2244,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2176 break; 2244 break;
2177 2245
2178 case LOAD_DIAG: 2246 case LOAD_DIAG:
2247 case LOAD_LOOPBACK_EXT:
2179 bp->state = BNX2X_STATE_DIAG; 2248 bp->state = BNX2X_STATE_DIAG;
2180 break; 2249 break;
2181 2250
@@ -2195,6 +2264,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2195 /* re-read iscsi info */ 2264 /* re-read iscsi info */
2196 bnx2x_get_iscsi_info(bp); 2265 bnx2x_get_iscsi_info(bp);
2197 bnx2x_setup_cnic_irq_info(bp); 2266 bnx2x_setup_cnic_irq_info(bp);
2267 bnx2x_setup_cnic_info(bp);
2198 if (bp->state == BNX2X_STATE_OPEN) 2268 if (bp->state == BNX2X_STATE_OPEN)
2199 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); 2269 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2200#endif 2270#endif
@@ -2215,7 +2285,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2215 return -EBUSY; 2285 return -EBUSY;
2216 } 2286 }
2217 2287
2218 bnx2x_dcbx_init(bp); 2288 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2289 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2290 bnx2x_dcbx_init(bp, false);
2291
2219 return 0; 2292 return 0;
2220 2293
2221#ifndef BNX2X_STOP_ON_ERROR 2294#ifndef BNX2X_STOP_ON_ERROR
@@ -2298,6 +2371,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
2298 2371
2299 /* Stop Tx */ 2372 /* Stop Tx */
2300 bnx2x_tx_disable(bp); 2373 bnx2x_tx_disable(bp);
2374 netdev_reset_tc(bp->dev);
2301 2375
2302#ifdef BCM_CNIC 2376#ifdef BCM_CNIC
2303 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); 2377 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
@@ -2456,8 +2530,8 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
2456#endif 2530#endif
2457 2531
2458 for_each_cos_in_tx_queue(fp, cos) 2532 for_each_cos_in_tx_queue(fp, cos)
2459 if (bnx2x_tx_queue_has_work(&fp->txdata[cos])) 2533 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
2460 bnx2x_tx_int(bp, &fp->txdata[cos]); 2534 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
2461 2535
2462 2536
2463 if (bnx2x_has_rx_work(fp)) { 2537 if (bnx2x_has_rx_work(fp)) {
@@ -2834,7 +2908,6 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2834{ 2908{
2835 struct bnx2x *bp = netdev_priv(dev); 2909 struct bnx2x *bp = netdev_priv(dev);
2836 2910
2837 struct bnx2x_fastpath *fp;
2838 struct netdev_queue *txq; 2911 struct netdev_queue *txq;
2839 struct bnx2x_fp_txdata *txdata; 2912 struct bnx2x_fp_txdata *txdata;
2840 struct sw_tx_bd *tx_buf; 2913 struct sw_tx_bd *tx_buf;
@@ -2844,7 +2917,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2844 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; 2917 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2845 u32 pbd_e2_parsing_data = 0; 2918 u32 pbd_e2_parsing_data = 0;
2846 u16 pkt_prod, bd_prod; 2919 u16 pkt_prod, bd_prod;
2847 int nbd, txq_index, fp_index, txdata_index; 2920 int nbd, txq_index;
2848 dma_addr_t mapping; 2921 dma_addr_t mapping;
2849 u32 xmit_type = bnx2x_xmit_type(bp, skb); 2922 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2850 int i; 2923 int i;
@@ -2863,39 +2936,22 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2863 2936
2864 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT); 2937 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2865 2938
2866 /* decode the fastpath index and the cos index from the txq */ 2939 txdata = &bp->bnx2x_txq[txq_index];
2867 fp_index = TXQ_TO_FP(txq_index);
2868 txdata_index = TXQ_TO_COS(txq_index);
2869
2870#ifdef BCM_CNIC
2871 /*
2872 * Override the above for the FCoE queue:
2873 * - FCoE fp entry is right after the ETH entries.
2874 * - FCoE L2 queue uses bp->txdata[0] only.
2875 */
2876 if (unlikely(!NO_FCOE(bp) && (txq_index ==
2877 bnx2x_fcoe_tx(bp, txq_index)))) {
2878 fp_index = FCOE_IDX;
2879 txdata_index = 0;
2880 }
2881#endif
2882 2940
2883 /* enable this debug print to view the transmission queue being used 2941 /* enable this debug print to view the transmission queue being used
2884 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n", 2942 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
2885 txq_index, fp_index, txdata_index); */ 2943 txq_index, fp_index, txdata_index); */
2886 2944
2887 /* locate the fastpath and the txdata */
2888 fp = &bp->fp[fp_index];
2889 txdata = &fp->txdata[txdata_index];
2890
2891 /* enable this debug print to view the tranmission details 2945 /* enable this debug print to view the tranmission details
2892 DP(NETIF_MSG_TX_QUEUED, 2946 DP(NETIF_MSG_TX_QUEUED,
2893 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n", 2947 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
2894 txdata->cid, fp_index, txdata_index, txdata, fp); */ 2948 txdata->cid, fp_index, txdata_index, txdata, fp); */
2895 2949
2896 if (unlikely(bnx2x_tx_avail(bp, txdata) < 2950 if (unlikely(bnx2x_tx_avail(bp, txdata) <
2897 (skb_shinfo(skb)->nr_frags + 3))) { 2951 skb_shinfo(skb)->nr_frags +
2898 fp->eth_q_stats.driver_xoff++; 2952 BDS_PER_TX_PKT +
2953 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
2954 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
2899 netif_tx_stop_queue(txq); 2955 netif_tx_stop_queue(txq);
2900 BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); 2956 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2901 return NETDEV_TX_BUSY; 2957 return NETDEV_TX_BUSY;
@@ -3169,7 +3225,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3169 3225
3170 txdata->tx_bd_prod += nbd; 3226 txdata->tx_bd_prod += nbd;
3171 3227
3172 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 4)) { 3228 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
3173 netif_tx_stop_queue(txq); 3229 netif_tx_stop_queue(txq);
3174 3230
3175 /* paired memory barrier is in bnx2x_tx_int(), we have to keep 3231 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
@@ -3177,8 +3233,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3177 * fp->bd_tx_cons */ 3233 * fp->bd_tx_cons */
3178 smp_mb(); 3234 smp_mb();
3179 3235
3180 fp->eth_q_stats.driver_xoff++; 3236 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3181 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4) 3237 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
3182 netif_tx_wake_queue(txq); 3238 netif_tx_wake_queue(txq);
3183 } 3239 }
3184 txdata->tx_pkt++; 3240 txdata->tx_pkt++;
@@ -3243,7 +3299,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3243 /* configure traffic class to transmission queue mapping */ 3299 /* configure traffic class to transmission queue mapping */
3244 for (cos = 0; cos < bp->max_cos; cos++) { 3300 for (cos = 0; cos < bp->max_cos; cos++) {
3245 count = BNX2X_NUM_ETH_QUEUES(bp); 3301 count = BNX2X_NUM_ETH_QUEUES(bp);
3246 offset = cos * MAX_TXQS_PER_COS; 3302 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
3247 netdev_set_tc_queue(dev, cos, count, offset); 3303 netdev_set_tc_queue(dev, cos, count, offset);
3248 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, 3304 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3249 "mapping tc %d to offset %d count %d\n", 3305 "mapping tc %d to offset %d count %d\n",
@@ -3342,7 +3398,7 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3342 if (!skip_tx_queue(bp, fp_index)) { 3398 if (!skip_tx_queue(bp, fp_index)) {
3343 /* fastpath tx rings: tx_buf tx_desc */ 3399 /* fastpath tx rings: tx_buf tx_desc */
3344 for_each_cos_in_tx_queue(fp, cos) { 3400 for_each_cos_in_tx_queue(fp, cos) {
3345 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; 3401 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
3346 3402
3347 DP(NETIF_MSG_IFDOWN, 3403 DP(NETIF_MSG_IFDOWN,
3348 "freeing tx memory of fp %d cos %d cid %d\n", 3404 "freeing tx memory of fp %d cos %d cid %d\n",
@@ -3414,7 +3470,7 @@ static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
3414 cqe_ring_prod); 3470 cqe_ring_prod);
3415 fp->rx_pkt = fp->rx_calls = 0; 3471 fp->rx_pkt = fp->rx_calls = 0;
3416 3472
3417 fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt; 3473 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
3418 3474
3419 return i - failure_cnt; 3475 return i - failure_cnt;
3420} 3476}
@@ -3499,7 +3555,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3499 if (!skip_tx_queue(bp, index)) { 3555 if (!skip_tx_queue(bp, index)) {
3500 /* fastpath tx rings: tx_buf tx_desc */ 3556 /* fastpath tx rings: tx_buf tx_desc */
3501 for_each_cos_in_tx_queue(fp, cos) { 3557 for_each_cos_in_tx_queue(fp, cos) {
3502 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; 3558 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
3503 3559
3504 DP(NETIF_MSG_IFUP, 3560 DP(NETIF_MSG_IFUP,
3505 "allocating tx memory of fp %d cos %d\n", 3561 "allocating tx memory of fp %d cos %d\n",
@@ -3582,7 +3638,7 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3582#ifdef BCM_CNIC 3638#ifdef BCM_CNIC
3583 if (!NO_FCOE(bp)) 3639 if (!NO_FCOE(bp))
3584 /* FCoE */ 3640 /* FCoE */
3585 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX)) 3641 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
3586 /* we will fail load process instead of mark 3642 /* we will fail load process instead of mark
3587 * NO_FCOE_FLAG 3643 * NO_FCOE_FLAG
3588 */ 3644 */
@@ -3607,7 +3663,7 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3607 */ 3663 */
3608 3664
3609 /* move FCoE fp even NO_FCOE_FLAG is on */ 3665 /* move FCoE fp even NO_FCOE_FLAG is on */
3610 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta); 3666 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
3611#endif 3667#endif
3612 bp->num_queues -= delta; 3668 bp->num_queues -= delta;
3613 BNX2X_ERR("Adjusted num of queues from %d to %d\n", 3669 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
@@ -3619,7 +3675,11 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3619 3675
3620void bnx2x_free_mem_bp(struct bnx2x *bp) 3676void bnx2x_free_mem_bp(struct bnx2x *bp)
3621{ 3677{
3678 kfree(bp->fp->tpa_info);
3622 kfree(bp->fp); 3679 kfree(bp->fp);
3680 kfree(bp->sp_objs);
3681 kfree(bp->fp_stats);
3682 kfree(bp->bnx2x_txq);
3623 kfree(bp->msix_table); 3683 kfree(bp->msix_table);
3624 kfree(bp->ilt); 3684 kfree(bp->ilt);
3625} 3685}
@@ -3630,6 +3690,8 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3630 struct msix_entry *tbl; 3690 struct msix_entry *tbl;
3631 struct bnx2x_ilt *ilt; 3691 struct bnx2x_ilt *ilt;
3632 int msix_table_size = 0; 3692 int msix_table_size = 0;
3693 int fp_array_size;
3694 int i;
3633 3695
3634 /* 3696 /*
3635 * The biggest MSI-X table we might need is as a maximum number of fast 3697 * The biggest MSI-X table we might need is as a maximum number of fast
@@ -3638,12 +3700,44 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3638 msix_table_size = bp->igu_sb_cnt + 1; 3700 msix_table_size = bp->igu_sb_cnt + 1;
3639 3701
3640 /* fp array: RSS plus CNIC related L2 queues */ 3702 /* fp array: RSS plus CNIC related L2 queues */
3641 fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE, 3703 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE;
3642 sizeof(*fp), GFP_KERNEL); 3704 BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
3705
3706 fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
3643 if (!fp) 3707 if (!fp)
3644 goto alloc_err; 3708 goto alloc_err;
3709 for (i = 0; i < fp_array_size; i++) {
3710 fp[i].tpa_info =
3711 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
3712 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
3713 if (!(fp[i].tpa_info))
3714 goto alloc_err;
3715 }
3716
3645 bp->fp = fp; 3717 bp->fp = fp;
3646 3718
3719 /* allocate sp objs */
3720 bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs),
3721 GFP_KERNEL);
3722 if (!bp->sp_objs)
3723 goto alloc_err;
3724
3725 /* allocate fp_stats */
3726 bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats),
3727 GFP_KERNEL);
3728 if (!bp->fp_stats)
3729 goto alloc_err;
3730
3731 /* Allocate memory for the transmission queues array */
3732 bp->bnx2x_txq_size = BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS;
3733#ifdef BCM_CNIC
3734 bp->bnx2x_txq_size++;
3735#endif
3736 bp->bnx2x_txq = kcalloc(bp->bnx2x_txq_size,
3737 sizeof(struct bnx2x_fp_txdata), GFP_KERNEL);
3738 if (!bp->bnx2x_txq)
3739 goto alloc_err;
3740
3647 /* msix table */ 3741 /* msix table */
3648 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL); 3742 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
3649 if (!tbl) 3743 if (!tbl)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 7cd99b75347a..dfa757e74296 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -29,6 +29,7 @@
29extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */ 29extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
30 30
31extern int num_queues; 31extern int num_queues;
32extern int int_mode;
32 33
33/************************ Macros ********************************/ 34/************************ Macros ********************************/
34#define BNX2X_PCI_FREE(x, y, size) \ 35#define BNX2X_PCI_FREE(x, y, size) \
@@ -89,12 +90,12 @@ void bnx2x_send_unload_done(struct bnx2x *bp);
89 * bnx2x_config_rss_pf - configure RSS parameters in a PF. 90 * bnx2x_config_rss_pf - configure RSS parameters in a PF.
90 * 91 *
91 * @bp: driver handle 92 * @bp: driver handle
92 * @rss_obj RSS object to use 93 * @rss_obj: RSS object to use
93 * @ind_table: indirection table to configure 94 * @ind_table: indirection table to configure
94 * @config_hash: re-configure RSS hash keys configuration 95 * @config_hash: re-configure RSS hash keys configuration
95 */ 96 */
96int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, 97int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
97 u8 *ind_table, bool config_hash); 98 bool config_hash);
98 99
99/** 100/**
100 * bnx2x__init_func_obj - init function object 101 * bnx2x__init_func_obj - init function object
@@ -244,6 +245,14 @@ int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
244 * @bp: driver handle 245 * @bp: driver handle
245 */ 246 */
246void bnx2x_setup_cnic_irq_info(struct bnx2x *bp); 247void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
248
249/**
250 * bnx2x_setup_cnic_info - provides cnic with updated info
251 *
252 * @bp: driver handle
253 */
254void bnx2x_setup_cnic_info(struct bnx2x *bp);
255
247#endif 256#endif
248 257
249/** 258/**
@@ -409,7 +418,7 @@ void bnx2x_ilt_set_info(struct bnx2x *bp);
409 * 418 *
410 * @bp: driver handle 419 * @bp: driver handle
411 */ 420 */
412void bnx2x_dcbx_init(struct bnx2x *bp); 421void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem);
413 422
414/** 423/**
415 * bnx2x_set_power_state - set power state to the requested value. 424 * bnx2x_set_power_state - set power state to the requested value.
@@ -487,7 +496,7 @@ void bnx2x_netif_start(struct bnx2x *bp);
487 * fills msix_table, requests vectors, updates num_queues 496 * fills msix_table, requests vectors, updates num_queues
488 * according to number of available vectors. 497 * according to number of available vectors.
489 */ 498 */
490int __devinit bnx2x_enable_msix(struct bnx2x *bp); 499int bnx2x_enable_msix(struct bnx2x *bp);
491 500
492/** 501/**
493 * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly 502 * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly
@@ -728,7 +737,7 @@ static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
728{ 737{
729 u8 cos; 738 u8 cos;
730 for_each_cos_in_tx_queue(fp, cos) 739 for_each_cos_in_tx_queue(fp, cos)
731 if (bnx2x_tx_queue_has_work(&fp->txdata[cos])) 740 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
732 return true; 741 return true;
733 return false; 742 return false;
734} 743}
@@ -780,8 +789,10 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
780{ 789{
781 int i; 790 int i;
782 791
792 bp->num_napi_queues = bp->num_queues;
793
783 /* Add NAPI objects */ 794 /* Add NAPI objects */
784 for_each_rx_queue(bp, i) 795 for_each_napi_rx_queue(bp, i)
785 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 796 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
786 bnx2x_poll, BNX2X_NAPI_WEIGHT); 797 bnx2x_poll, BNX2X_NAPI_WEIGHT);
787} 798}
@@ -790,10 +801,12 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp)
790{ 801{
791 int i; 802 int i;
792 803
793 for_each_rx_queue(bp, i) 804 for_each_napi_rx_queue(bp, i)
794 netif_napi_del(&bnx2x_fp(bp, i, napi)); 805 netif_napi_del(&bnx2x_fp(bp, i, napi));
795} 806}
796 807
808void bnx2x_set_int_mode(struct bnx2x *bp);
809
797static inline void bnx2x_disable_msi(struct bnx2x *bp) 810static inline void bnx2x_disable_msi(struct bnx2x *bp)
798{ 811{
799 if (bp->flags & USING_MSIX_FLAG) { 812 if (bp->flags & USING_MSIX_FLAG) {
@@ -809,7 +822,8 @@ static inline int bnx2x_calc_num_queues(struct bnx2x *bp)
809{ 822{
810 return num_queues ? 823 return num_queues ?
811 min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) : 824 min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) :
812 min_t(int, num_online_cpus(), BNX2X_MAX_QUEUES(bp)); 825 min_t(int, netif_get_num_default_rss_queues(),
826 BNX2X_MAX_QUEUES(bp));
813} 827}
814 828
815static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) 829static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
@@ -865,11 +879,9 @@ static inline int func_by_vn(struct bnx2x *bp, int vn)
865 return 2 * vn + BP_PORT(bp); 879 return 2 * vn + BP_PORT(bp);
866} 880}
867 881
868static inline int bnx2x_config_rss_eth(struct bnx2x *bp, u8 *ind_table, 882static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash)
869 bool config_hash)
870{ 883{
871 return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, ind_table, 884 return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, config_hash);
872 config_hash);
873} 885}
874 886
875/** 887/**
@@ -975,8 +987,8 @@ static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp,
975 struct bnx2x *bp = fp->bp; 987 struct bnx2x *bp = fp->bp;
976 988
977 /* Configure classification DBs */ 989 /* Configure classification DBs */
978 bnx2x_init_mac_obj(bp, &fp->mac_obj, fp->cl_id, fp->cid, 990 bnx2x_init_mac_obj(bp, &bnx2x_sp_obj(bp, fp).mac_obj, fp->cl_id,
979 BP_FUNC(bp), bnx2x_sp(bp, mac_rdata), 991 fp->cid, BP_FUNC(bp), bnx2x_sp(bp, mac_rdata),
980 bnx2x_sp_mapping(bp, mac_rdata), 992 bnx2x_sp_mapping(bp, mac_rdata),
981 BNX2X_FILTER_MAC_PENDING, 993 BNX2X_FILTER_MAC_PENDING,
982 &bp->sp_state, obj_type, 994 &bp->sp_state, obj_type,
@@ -1068,12 +1080,14 @@ static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
1068} 1080}
1069 1081
1070static inline void bnx2x_init_txdata(struct bnx2x *bp, 1082static inline void bnx2x_init_txdata(struct bnx2x *bp,
1071 struct bnx2x_fp_txdata *txdata, u32 cid, int txq_index, 1083 struct bnx2x_fp_txdata *txdata, u32 cid,
1072 __le16 *tx_cons_sb) 1084 int txq_index, __le16 *tx_cons_sb,
1085 struct bnx2x_fastpath *fp)
1073{ 1086{
1074 txdata->cid = cid; 1087 txdata->cid = cid;
1075 txdata->txq_index = txq_index; 1088 txdata->txq_index = txq_index;
1076 txdata->tx_cons_sb = tx_cons_sb; 1089 txdata->tx_cons_sb = tx_cons_sb;
1090 txdata->parent_fp = fp;
1077 1091
1078 DP(NETIF_MSG_IFUP, "created tx data cid %d, txq %d\n", 1092 DP(NETIF_MSG_IFUP, "created tx data cid %d, txq %d\n",
1079 txdata->cid, txdata->txq_index); 1093 txdata->cid, txdata->txq_index);
@@ -1107,18 +1121,13 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
1107 bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp); 1121 bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
1108 bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, 1122 bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
1109 BNX2X_FCOE_ETH_CL_ID_IDX); 1123 BNX2X_FCOE_ETH_CL_ID_IDX);
1110 /** Current BNX2X_FCOE_ETH_CID deffinition implies not more than 1124 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
1111 * 16 ETH clients per function when CNIC is enabled!
1112 *
1113 * Fix it ASAP!!!
1114 */
1115 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID;
1116 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; 1125 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
1117 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; 1126 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
1118 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; 1127 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
1119 1128 bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
1120 bnx2x_init_txdata(bp, &bnx2x_fcoe(bp, txdata[0]), 1129 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
1121 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX); 1130 fp);
1122 1131
1123 DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index); 1132 DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
1124 1133
@@ -1135,8 +1144,8 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
1135 /* No multi-CoS for FCoE L2 client */ 1144 /* No multi-CoS for FCoE L2 client */
1136 BUG_ON(fp->max_cos != 1); 1145 BUG_ON(fp->max_cos != 1);
1137 1146
1138 bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, &fp->cid, 1, 1147 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
1139 BP_FUNC(bp), bnx2x_sp(bp, q_rdata), 1148 &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
1140 bnx2x_sp_mapping(bp, q_rdata), q_type); 1149 bnx2x_sp_mapping(bp, q_rdata), q_type);
1141 1150
1142 DP(NETIF_MSG_IFUP, 1151 DP(NETIF_MSG_IFUP,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 4f9244bd7530..8a73374e52a7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -972,23 +972,26 @@ void bnx2x_dcbx_init_params(struct bnx2x *bp)
972 bp->dcbx_config_params.admin_default_priority = 0; 972 bp->dcbx_config_params.admin_default_priority = 0;
973} 973}
974 974
975void bnx2x_dcbx_init(struct bnx2x *bp) 975void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem)
976{ 976{
977 u32 dcbx_lldp_params_offset = SHMEM_LLDP_DCBX_PARAMS_NONE; 977 u32 dcbx_lldp_params_offset = SHMEM_LLDP_DCBX_PARAMS_NONE;
978 978
979 /* only PMF can send ADMIN msg to MFW in old MFW versions */
980 if ((!bp->port.pmf) && (!(bp->flags & BC_SUPPORTS_DCBX_MSG_NON_PMF)))
981 return;
982
979 if (bp->dcbx_enabled <= 0) 983 if (bp->dcbx_enabled <= 0)
980 return; 984 return;
981 985
982 /* validate: 986 /* validate:
983 * chip of good for dcbx version, 987 * chip of good for dcbx version,
984 * dcb is wanted 988 * dcb is wanted
985 * the function is pmf
986 * shmem2 contains DCBX support fields 989 * shmem2 contains DCBX support fields
987 */ 990 */
988 DP(BNX2X_MSG_DCB, "dcb_state %d bp->port.pmf %d\n", 991 DP(BNX2X_MSG_DCB, "dcb_state %d bp->port.pmf %d\n",
989 bp->dcb_state, bp->port.pmf); 992 bp->dcb_state, bp->port.pmf);
990 993
991 if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf && 994 if (bp->dcb_state == BNX2X_DCB_STATE_ON &&
992 SHMEM2_HAS(bp, dcbx_lldp_params_offset)) { 995 SHMEM2_HAS(bp, dcbx_lldp_params_offset)) {
993 dcbx_lldp_params_offset = 996 dcbx_lldp_params_offset =
994 SHMEM2_RD(bp, dcbx_lldp_params_offset); 997 SHMEM2_RD(bp, dcbx_lldp_params_offset);
@@ -999,12 +1002,23 @@ void bnx2x_dcbx_init(struct bnx2x *bp)
999 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0); 1002 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
1000 1003
1001 if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) { 1004 if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) {
1002 bnx2x_dcbx_admin_mib_updated_params(bp, 1005 /* need HW lock to avoid scenario of two drivers
1003 dcbx_lldp_params_offset); 1006 * writing in parallel to shmem
1007 */
1008 bnx2x_acquire_hw_lock(bp,
1009 HW_LOCK_RESOURCE_DCBX_ADMIN_MIB);
1010 if (update_shmem)
1011 bnx2x_dcbx_admin_mib_updated_params(bp,
1012 dcbx_lldp_params_offset);
1004 1013
1005 /* Let HW start negotiation */ 1014 /* Let HW start negotiation */
1006 bnx2x_fw_command(bp, 1015 bnx2x_fw_command(bp,
1007 DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0); 1016 DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0);
1017 /* release HW lock only after MFW acks that it finished
1018 * reading values from shmem
1019 */
1020 bnx2x_release_hw_lock(bp,
1021 HW_LOCK_RESOURCE_DCBX_ADMIN_MIB);
1008 } 1022 }
1009 } 1023 }
1010} 1024}
@@ -2063,10 +2077,8 @@ static u8 bnx2x_dcbnl_set_all(struct net_device *netdev)
2063 "Handling parity error recovery. Try again later\n"); 2077 "Handling parity error recovery. Try again later\n");
2064 return 1; 2078 return 1;
2065 } 2079 }
2066 if (netif_running(bp->dev)) { 2080 if (netif_running(bp->dev))
2067 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 2081 bnx2x_dcbx_init(bp, true);
2068 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2069 }
2070 DP(BNX2X_MSG_DCB, "set_dcbx_params done (%d)\n", rc); 2082 DP(BNX2X_MSG_DCB, "set_dcbx_params done (%d)\n", rc);
2071 if (rc) 2083 if (rc)
2072 return 1; 2084 return 1;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index ddc18ee5c5ae..bff31290198b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -177,6 +177,8 @@ static const struct {
177 4, STATS_FLAGS_FUNC, "recoverable_errors" }, 177 4, STATS_FLAGS_FUNC, "recoverable_errors" },
178 { STATS_OFFSET32(unrecoverable_error), 178 { STATS_OFFSET32(unrecoverable_error),
179 4, STATS_FLAGS_FUNC, "unrecoverable_errors" }, 179 4, STATS_FLAGS_FUNC, "unrecoverable_errors" },
180 { STATS_OFFSET32(eee_tx_lpi),
181 4, STATS_FLAGS_PORT, "Tx LPI entry count"}
180}; 182};
181 183
182#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr) 184#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
@@ -185,7 +187,8 @@ static int bnx2x_get_port_type(struct bnx2x *bp)
185 int port_type; 187 int port_type;
186 u32 phy_idx = bnx2x_get_cur_phy_idx(bp); 188 u32 phy_idx = bnx2x_get_cur_phy_idx(bp);
187 switch (bp->link_params.phy[phy_idx].media_type) { 189 switch (bp->link_params.phy[phy_idx].media_type) {
188 case ETH_PHY_SFP_FIBER: 190 case ETH_PHY_SFPP_10G_FIBER:
191 case ETH_PHY_SFP_1G_FIBER:
189 case ETH_PHY_XFP_FIBER: 192 case ETH_PHY_XFP_FIBER:
190 case ETH_PHY_KR: 193 case ETH_PHY_KR:
191 case ETH_PHY_CX4: 194 case ETH_PHY_CX4:
@@ -218,6 +221,11 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
218 (bp->port.supported[cfg_idx ^ 1] & 221 (bp->port.supported[cfg_idx ^ 1] &
219 (SUPPORTED_TP | SUPPORTED_FIBRE)); 222 (SUPPORTED_TP | SUPPORTED_FIBRE));
220 cmd->advertising = bp->port.advertising[cfg_idx]; 223 cmd->advertising = bp->port.advertising[cfg_idx];
224 if (bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type ==
225 ETH_PHY_SFP_1G_FIBER) {
226 cmd->supported &= ~(SUPPORTED_10000baseT_Full);
227 cmd->advertising &= ~(ADVERTISED_10000baseT_Full);
228 }
221 229
222 if ((bp->state == BNX2X_STATE_OPEN) && (bp->link_vars.link_up)) { 230 if ((bp->state == BNX2X_STATE_OPEN) && (bp->link_vars.link_up)) {
223 if (!(bp->flags & MF_FUNC_DIS)) { 231 if (!(bp->flags & MF_FUNC_DIS)) {
@@ -293,7 +301,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
293{ 301{
294 struct bnx2x *bp = netdev_priv(dev); 302 struct bnx2x *bp = netdev_priv(dev);
295 u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config; 303 u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
296 u32 speed; 304 u32 speed, phy_idx;
297 305
298 if (IS_MF_SD(bp)) 306 if (IS_MF_SD(bp))
299 return 0; 307 return 0;
@@ -548,9 +556,11 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
548 "10G half not supported\n"); 556 "10G half not supported\n");
549 return -EINVAL; 557 return -EINVAL;
550 } 558 }
551 559 phy_idx = bnx2x_get_cur_phy_idx(bp);
552 if (!(bp->port.supported[cfg_idx] 560 if (!(bp->port.supported[cfg_idx]
553 & SUPPORTED_10000baseT_Full)) { 561 & SUPPORTED_10000baseT_Full) ||
562 (bp->link_params.phy[phy_idx].media_type ==
563 ETH_PHY_SFP_1G_FIBER)) {
554 DP(BNX2X_MSG_ETHTOOL, 564 DP(BNX2X_MSG_ETHTOOL,
555 "10G full not supported\n"); 565 "10G full not supported\n");
556 return -EINVAL; 566 return -EINVAL;
@@ -824,7 +834,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
824 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver); 834 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
825 strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); 835 strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
826 info->n_stats = BNX2X_NUM_STATS; 836 info->n_stats = BNX2X_NUM_STATS;
827 info->testinfo_len = BNX2X_NUM_TESTS; 837 info->testinfo_len = BNX2X_NUM_TESTS(bp);
828 info->eedump_len = bp->common.flash_size; 838 info->eedump_len = bp->common.flash_size;
829 info->regdump_len = bnx2x_get_regs_len(dev); 839 info->regdump_len = bnx2x_get_regs_len(dev);
830} 840}
@@ -1150,6 +1160,65 @@ static int bnx2x_get_eeprom(struct net_device *dev,
1150 return rc; 1160 return rc;
1151} 1161}
1152 1162
1163static int bnx2x_get_module_eeprom(struct net_device *dev,
1164 struct ethtool_eeprom *ee,
1165 u8 *data)
1166{
1167 struct bnx2x *bp = netdev_priv(dev);
1168 int rc = 0, phy_idx;
1169 u8 *user_data = data;
1170 int remaining_len = ee->len, xfer_size;
1171 unsigned int page_off = ee->offset;
1172
1173 if (!netif_running(dev)) {
1174 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1175 "cannot access eeprom when the interface is down\n");
1176 return -EAGAIN;
1177 }
1178
1179 phy_idx = bnx2x_get_cur_phy_idx(bp);
1180 bnx2x_acquire_phy_lock(bp);
1181 while (!rc && remaining_len > 0) {
1182 xfer_size = (remaining_len > SFP_EEPROM_PAGE_SIZE) ?
1183 SFP_EEPROM_PAGE_SIZE : remaining_len;
1184 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
1185 &bp->link_params,
1186 page_off,
1187 xfer_size,
1188 user_data);
1189 remaining_len -= xfer_size;
1190 user_data += xfer_size;
1191 page_off += xfer_size;
1192 }
1193
1194 bnx2x_release_phy_lock(bp);
1195 return rc;
1196}
1197
1198static int bnx2x_get_module_info(struct net_device *dev,
1199 struct ethtool_modinfo *modinfo)
1200{
1201 struct bnx2x *bp = netdev_priv(dev);
1202 int phy_idx;
1203 if (!netif_running(dev)) {
1204 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1205 "cannot access eeprom when the interface is down\n");
1206 return -EAGAIN;
1207 }
1208
1209 phy_idx = bnx2x_get_cur_phy_idx(bp);
1210 switch (bp->link_params.phy[phy_idx].media_type) {
1211 case ETH_PHY_SFPP_10G_FIBER:
1212 case ETH_PHY_SFP_1G_FIBER:
1213 case ETH_PHY_DA_TWINAX:
1214 modinfo->type = ETH_MODULE_SFF_8079;
1215 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
1216 return 0;
1217 default:
1218 return -EOPNOTSUPP;
1219 }
1220}
1221
1153static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val, 1222static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
1154 u32 cmd_flags) 1223 u32 cmd_flags)
1155{ 1224{
@@ -1531,18 +1600,146 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
1531 return 0; 1600 return 0;
1532} 1601}
1533 1602
1534static const struct { 1603static char *bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF] = {
1535 char string[ETH_GSTRING_LEN]; 1604 "register_test (offline) ",
1536} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = { 1605 "memory_test (offline) ",
1537 { "register_test (offline)" }, 1606 "int_loopback_test (offline)",
1538 { "memory_test (offline)" }, 1607 "ext_loopback_test (offline)",
1539 { "loopback_test (offline)" }, 1608 "nvram_test (online) ",
1540 { "nvram_test (online)" }, 1609 "interrupt_test (online) ",
1541 { "interrupt_test (online)" }, 1610 "link_test (online) "
1542 { "link_test (online)" },
1543 { "idle check (online)" }
1544}; 1611};
1545 1612
1613static u32 bnx2x_eee_to_adv(u32 eee_adv)
1614{
1615 u32 modes = 0;
1616
1617 if (eee_adv & SHMEM_EEE_100M_ADV)
1618 modes |= ADVERTISED_100baseT_Full;
1619 if (eee_adv & SHMEM_EEE_1G_ADV)
1620 modes |= ADVERTISED_1000baseT_Full;
1621 if (eee_adv & SHMEM_EEE_10G_ADV)
1622 modes |= ADVERTISED_10000baseT_Full;
1623
1624 return modes;
1625}
1626
1627static u32 bnx2x_adv_to_eee(u32 modes, u32 shift)
1628{
1629 u32 eee_adv = 0;
1630 if (modes & ADVERTISED_100baseT_Full)
1631 eee_adv |= SHMEM_EEE_100M_ADV;
1632 if (modes & ADVERTISED_1000baseT_Full)
1633 eee_adv |= SHMEM_EEE_1G_ADV;
1634 if (modes & ADVERTISED_10000baseT_Full)
1635 eee_adv |= SHMEM_EEE_10G_ADV;
1636
1637 return eee_adv << shift;
1638}
1639
1640static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
1641{
1642 struct bnx2x *bp = netdev_priv(dev);
1643 u32 eee_cfg;
1644
1645 if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) {
1646 DP(BNX2X_MSG_ETHTOOL, "BC Version does not support EEE\n");
1647 return -EOPNOTSUPP;
1648 }
1649
1650 eee_cfg = SHMEM2_RD(bp, eee_status[BP_PORT(bp)]);
1651
1652 edata->supported =
1653 bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >>
1654 SHMEM_EEE_SUPPORTED_SHIFT);
1655
1656 edata->advertised =
1657 bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_ADV_STATUS_MASK) >>
1658 SHMEM_EEE_ADV_STATUS_SHIFT);
1659 edata->lp_advertised =
1660 bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_LP_ADV_STATUS_MASK) >>
1661 SHMEM_EEE_LP_ADV_STATUS_SHIFT);
1662
1663 /* SHMEM value is in 16u units --> Convert to 1u units. */
1664 edata->tx_lpi_timer = (eee_cfg & SHMEM_EEE_TIMER_MASK) << 4;
1665
1666 edata->eee_enabled = (eee_cfg & SHMEM_EEE_REQUESTED_BIT) ? 1 : 0;
1667 edata->eee_active = (eee_cfg & SHMEM_EEE_ACTIVE_BIT) ? 1 : 0;
1668 edata->tx_lpi_enabled = (eee_cfg & SHMEM_EEE_LPI_REQUESTED_BIT) ? 1 : 0;
1669
1670 return 0;
1671}
1672
1673static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
1674{
1675 struct bnx2x *bp = netdev_priv(dev);
1676 u32 eee_cfg;
1677 u32 advertised;
1678
1679 if (IS_MF(bp))
1680 return 0;
1681
1682 if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) {
1683 DP(BNX2X_MSG_ETHTOOL, "BC Version does not support EEE\n");
1684 return -EOPNOTSUPP;
1685 }
1686
1687 eee_cfg = SHMEM2_RD(bp, eee_status[BP_PORT(bp)]);
1688
1689 if (!(eee_cfg & SHMEM_EEE_SUPPORTED_MASK)) {
1690 DP(BNX2X_MSG_ETHTOOL, "Board does not support EEE!\n");
1691 return -EOPNOTSUPP;
1692 }
1693
1694 advertised = bnx2x_adv_to_eee(edata->advertised,
1695 SHMEM_EEE_ADV_STATUS_SHIFT);
1696 if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) {
1697 DP(BNX2X_MSG_ETHTOOL,
1698 "Direct manipulation of EEE advertisment is not supported\n");
1699 return -EINVAL;
1700 }
1701
1702 if (edata->tx_lpi_timer > EEE_MODE_TIMER_MASK) {
1703 DP(BNX2X_MSG_ETHTOOL,
1704 "Maximal Tx Lpi timer supported is %x(u)\n",
1705 EEE_MODE_TIMER_MASK);
1706 return -EINVAL;
1707 }
1708 if (edata->tx_lpi_enabled &&
1709 (edata->tx_lpi_timer < EEE_MODE_NVRAM_AGGRESSIVE_TIME)) {
1710 DP(BNX2X_MSG_ETHTOOL,
1711 "Minimal Tx Lpi timer supported is %d(u)\n",
1712 EEE_MODE_NVRAM_AGGRESSIVE_TIME);
1713 return -EINVAL;
1714 }
1715
1716 /* All is well; Apply changes*/
1717 if (edata->eee_enabled)
1718 bp->link_params.eee_mode |= EEE_MODE_ADV_LPI;
1719 else
1720 bp->link_params.eee_mode &= ~EEE_MODE_ADV_LPI;
1721
1722 if (edata->tx_lpi_enabled)
1723 bp->link_params.eee_mode |= EEE_MODE_ENABLE_LPI;
1724 else
1725 bp->link_params.eee_mode &= ~EEE_MODE_ENABLE_LPI;
1726
1727 bp->link_params.eee_mode &= ~EEE_MODE_TIMER_MASK;
1728 bp->link_params.eee_mode |= (edata->tx_lpi_timer &
1729 EEE_MODE_TIMER_MASK) |
1730 EEE_MODE_OVERRIDE_NVRAM |
1731 EEE_MODE_OUTPUT_TIME;
1732
1733 /* Restart link to propogate changes */
1734 if (netif_running(dev)) {
1735 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1736 bnx2x_link_set(bp);
1737 }
1738
1739 return 0;
1740}
1741
1742
1546enum { 1743enum {
1547 BNX2X_CHIP_E1_OFST = 0, 1744 BNX2X_CHIP_E1_OFST = 0,
1548 BNX2X_CHIP_E1H_OFST, 1745 BNX2X_CHIP_E1H_OFST,
@@ -1811,6 +2008,14 @@ static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes)
1811 2008
1812 if (cnt <= 0 && bnx2x_link_test(bp, is_serdes)) 2009 if (cnt <= 0 && bnx2x_link_test(bp, is_serdes))
1813 DP(BNX2X_MSG_ETHTOOL, "Timeout waiting for link up\n"); 2010 DP(BNX2X_MSG_ETHTOOL, "Timeout waiting for link up\n");
2011
2012 cnt = 1400;
2013 while (!bp->link_vars.link_up && cnt--)
2014 msleep(20);
2015
2016 if (cnt <= 0 && !bp->link_vars.link_up)
2017 DP(BNX2X_MSG_ETHTOOL,
2018 "Timeout waiting for link init\n");
1814 } 2019 }
1815} 2020}
1816 2021
@@ -1821,7 +2026,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
1821 unsigned char *packet; 2026 unsigned char *packet;
1822 struct bnx2x_fastpath *fp_rx = &bp->fp[0]; 2027 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
1823 struct bnx2x_fastpath *fp_tx = &bp->fp[0]; 2028 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
1824 struct bnx2x_fp_txdata *txdata = &fp_tx->txdata[0]; 2029 struct bnx2x_fp_txdata *txdata = fp_tx->txdata_ptr[0];
1825 u16 tx_start_idx, tx_idx; 2030 u16 tx_start_idx, tx_idx;
1826 u16 rx_start_idx, rx_idx; 2031 u16 rx_start_idx, rx_idx;
1827 u16 pkt_prod, bd_prod; 2032 u16 pkt_prod, bd_prod;
@@ -1836,13 +2041,16 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
1836 u16 len; 2041 u16 len;
1837 int rc = -ENODEV; 2042 int rc = -ENODEV;
1838 u8 *data; 2043 u8 *data;
1839 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txdata->txq_index); 2044 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev,
2045 txdata->txq_index);
1840 2046
1841 /* check the loopback mode */ 2047 /* check the loopback mode */
1842 switch (loopback_mode) { 2048 switch (loopback_mode) {
1843 case BNX2X_PHY_LOOPBACK: 2049 case BNX2X_PHY_LOOPBACK:
1844 if (bp->link_params.loopback_mode != LOOPBACK_XGXS) 2050 if (bp->link_params.loopback_mode != LOOPBACK_XGXS) {
2051 DP(BNX2X_MSG_ETHTOOL, "PHY loopback not supported\n");
1845 return -EINVAL; 2052 return -EINVAL;
2053 }
1846 break; 2054 break;
1847 case BNX2X_MAC_LOOPBACK: 2055 case BNX2X_MAC_LOOPBACK:
1848 if (CHIP_IS_E3(bp)) { 2056 if (CHIP_IS_E3(bp)) {
@@ -1859,6 +2067,13 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
1859 2067
1860 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 2068 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1861 break; 2069 break;
2070 case BNX2X_EXT_LOOPBACK:
2071 if (bp->link_params.loopback_mode != LOOPBACK_EXT) {
2072 DP(BNX2X_MSG_ETHTOOL,
2073 "Can't configure external loopback\n");
2074 return -EINVAL;
2075 }
2076 break;
1862 default: 2077 default:
1863 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); 2078 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
1864 return -EINVAL; 2079 return -EINVAL;
@@ -2030,6 +2245,38 @@ static int bnx2x_test_loopback(struct bnx2x *bp)
2030 return rc; 2245 return rc;
2031} 2246}
2032 2247
2248static int bnx2x_test_ext_loopback(struct bnx2x *bp)
2249{
2250 int rc;
2251 u8 is_serdes =
2252 (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
2253
2254 if (BP_NOMCP(bp))
2255 return -ENODEV;
2256
2257 if (!netif_running(bp->dev))
2258 return BNX2X_EXT_LOOPBACK_FAILED;
2259
2260 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2261 rc = bnx2x_nic_load(bp, LOAD_LOOPBACK_EXT);
2262 if (rc) {
2263 DP(BNX2X_MSG_ETHTOOL,
2264 "Can't perform self-test, nic_load (for external lb) failed\n");
2265 return -ENODEV;
2266 }
2267 bnx2x_wait_for_link(bp, 1, is_serdes);
2268
2269 bnx2x_netif_stop(bp, 1);
2270
2271 rc = bnx2x_run_loopback(bp, BNX2X_EXT_LOOPBACK);
2272 if (rc)
2273 DP(BNX2X_MSG_ETHTOOL, "EXT loopback failed (res %d)\n", rc);
2274
2275 bnx2x_netif_start(bp);
2276
2277 return rc;
2278}
2279
2033#define CRC32_RESIDUAL 0xdebb20e3 2280#define CRC32_RESIDUAL 0xdebb20e3
2034 2281
2035static int bnx2x_test_nvram(struct bnx2x *bp) 2282static int bnx2x_test_nvram(struct bnx2x *bp)
@@ -2112,7 +2359,7 @@ static int bnx2x_test_intr(struct bnx2x *bp)
2112 return -ENODEV; 2359 return -ENODEV;
2113 } 2360 }
2114 2361
2115 params.q_obj = &bp->fp->q_obj; 2362 params.q_obj = &bp->sp_objs->q_obj;
2116 params.cmd = BNX2X_Q_CMD_EMPTY; 2363 params.cmd = BNX2X_Q_CMD_EMPTY;
2117 2364
2118 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags); 2365 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
@@ -2125,24 +2372,31 @@ static void bnx2x_self_test(struct net_device *dev,
2125{ 2372{
2126 struct bnx2x *bp = netdev_priv(dev); 2373 struct bnx2x *bp = netdev_priv(dev);
2127 u8 is_serdes; 2374 u8 is_serdes;
2375 int rc;
2376
2128 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 2377 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2129 netdev_err(bp->dev, 2378 netdev_err(bp->dev,
2130 "Handling parity error recovery. Try again later\n"); 2379 "Handling parity error recovery. Try again later\n");
2131 etest->flags |= ETH_TEST_FL_FAILED; 2380 etest->flags |= ETH_TEST_FL_FAILED;
2132 return; 2381 return;
2133 } 2382 }
2383 DP(BNX2X_MSG_ETHTOOL,
2384 "Self-test command parameters: offline = %d, external_lb = %d\n",
2385 (etest->flags & ETH_TEST_FL_OFFLINE),
2386 (etest->flags & ETH_TEST_FL_EXTERNAL_LB)>>2);
2134 2387
2135 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS); 2388 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp));
2136 2389
2137 if (!netif_running(dev)) 2390 if (!netif_running(dev)) {
2391 DP(BNX2X_MSG_ETHTOOL,
2392 "Can't perform self-test when interface is down\n");
2138 return; 2393 return;
2394 }
2139 2395
2140 /* offline tests are not supported in MF mode */
2141 if (IS_MF(bp))
2142 etest->flags &= ~ETH_TEST_FL_OFFLINE;
2143 is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0; 2396 is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
2144 2397
2145 if (etest->flags & ETH_TEST_FL_OFFLINE) { 2398 /* offline tests are not supported in MF mode */
2399 if ((etest->flags & ETH_TEST_FL_OFFLINE) && !IS_MF(bp)) {
2146 int port = BP_PORT(bp); 2400 int port = BP_PORT(bp);
2147 u32 val; 2401 u32 val;
2148 u8 link_up; 2402 u8 link_up;
@@ -2155,7 +2409,14 @@ static void bnx2x_self_test(struct net_device *dev,
2155 link_up = bp->link_vars.link_up; 2409 link_up = bp->link_vars.link_up;
2156 2410
2157 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 2411 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2158 bnx2x_nic_load(bp, LOAD_DIAG); 2412 rc = bnx2x_nic_load(bp, LOAD_DIAG);
2413 if (rc) {
2414 etest->flags |= ETH_TEST_FL_FAILED;
2415 DP(BNX2X_MSG_ETHTOOL,
2416 "Can't perform self-test, nic_load (for offline) failed\n");
2417 return;
2418 }
2419
2159 /* wait until link state is restored */ 2420 /* wait until link state is restored */
2160 bnx2x_wait_for_link(bp, 1, is_serdes); 2421 bnx2x_wait_for_link(bp, 1, is_serdes);
2161 2422
@@ -2168,30 +2429,51 @@ static void bnx2x_self_test(struct net_device *dev,
2168 etest->flags |= ETH_TEST_FL_FAILED; 2429 etest->flags |= ETH_TEST_FL_FAILED;
2169 } 2430 }
2170 2431
2171 buf[2] = bnx2x_test_loopback(bp); 2432 buf[2] = bnx2x_test_loopback(bp); /* internal LB */
2172 if (buf[2] != 0) 2433 if (buf[2] != 0)
2173 etest->flags |= ETH_TEST_FL_FAILED; 2434 etest->flags |= ETH_TEST_FL_FAILED;
2174 2435
2436 if (etest->flags & ETH_TEST_FL_EXTERNAL_LB) {
2437 buf[3] = bnx2x_test_ext_loopback(bp); /* external LB */
2438 if (buf[3] != 0)
2439 etest->flags |= ETH_TEST_FL_FAILED;
2440 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
2441 }
2442
2175 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 2443 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2176 2444
2177 /* restore input for TX port IF */ 2445 /* restore input for TX port IF */
2178 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val); 2446 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
2179 2447 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2180 bnx2x_nic_load(bp, LOAD_NORMAL); 2448 if (rc) {
2449 etest->flags |= ETH_TEST_FL_FAILED;
2450 DP(BNX2X_MSG_ETHTOOL,
2451 "Can't perform self-test, nic_load (for online) failed\n");
2452 return;
2453 }
2181 /* wait until link state is restored */ 2454 /* wait until link state is restored */
2182 bnx2x_wait_for_link(bp, link_up, is_serdes); 2455 bnx2x_wait_for_link(bp, link_up, is_serdes);
2183 } 2456 }
2184 if (bnx2x_test_nvram(bp) != 0) { 2457 if (bnx2x_test_nvram(bp) != 0) {
2185 buf[3] = 1; 2458 if (!IS_MF(bp))
2459 buf[4] = 1;
2460 else
2461 buf[0] = 1;
2186 etest->flags |= ETH_TEST_FL_FAILED; 2462 etest->flags |= ETH_TEST_FL_FAILED;
2187 } 2463 }
2188 if (bnx2x_test_intr(bp) != 0) { 2464 if (bnx2x_test_intr(bp) != 0) {
2189 buf[4] = 1; 2465 if (!IS_MF(bp))
2466 buf[5] = 1;
2467 else
2468 buf[1] = 1;
2190 etest->flags |= ETH_TEST_FL_FAILED; 2469 etest->flags |= ETH_TEST_FL_FAILED;
2191 } 2470 }
2192 2471
2193 if (bnx2x_link_test(bp, is_serdes) != 0) { 2472 if (bnx2x_link_test(bp, is_serdes) != 0) {
2194 buf[5] = 1; 2473 if (!IS_MF(bp))
2474 buf[6] = 1;
2475 else
2476 buf[2] = 1;
2195 etest->flags |= ETH_TEST_FL_FAILED; 2477 etest->flags |= ETH_TEST_FL_FAILED;
2196 } 2478 }
2197 2479
@@ -2236,7 +2518,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
2236 return num_stats; 2518 return num_stats;
2237 2519
2238 case ETH_SS_TEST: 2520 case ETH_SS_TEST:
2239 return BNX2X_NUM_TESTS; 2521 return BNX2X_NUM_TESTS(bp);
2240 2522
2241 default: 2523 default:
2242 return -EINVAL; 2524 return -EINVAL;
@@ -2246,7 +2528,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
2246static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 2528static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
2247{ 2529{
2248 struct bnx2x *bp = netdev_priv(dev); 2530 struct bnx2x *bp = netdev_priv(dev);
2249 int i, j, k; 2531 int i, j, k, offset, start;
2250 char queue_name[MAX_QUEUE_NAME_LEN+1]; 2532 char queue_name[MAX_QUEUE_NAME_LEN+1];
2251 2533
2252 switch (stringset) { 2534 switch (stringset) {
@@ -2277,7 +2559,17 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
2277 break; 2559 break;
2278 2560
2279 case ETH_SS_TEST: 2561 case ETH_SS_TEST:
2280 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr)); 2562 /* First 4 tests cannot be done in MF mode */
2563 if (!IS_MF(bp))
2564 start = 0;
2565 else
2566 start = 4;
2567 for (i = 0, j = start; j < (start + BNX2X_NUM_TESTS(bp));
2568 i++, j++) {
2569 offset = sprintf(buf+32*i, "%s",
2570 bnx2x_tests_str_arr[j]);
2571 *(buf+offset) = '\0';
2572 }
2281 break; 2573 break;
2282 } 2574 }
2283} 2575}
@@ -2291,7 +2583,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
2291 2583
2292 if (is_multi(bp)) { 2584 if (is_multi(bp)) {
2293 for_each_eth_queue(bp, i) { 2585 for_each_eth_queue(bp, i) {
2294 hw_stats = (u32 *)&bp->fp[i].eth_q_stats; 2586 hw_stats = (u32 *)&bp->fp_stats[i].eth_q_stats;
2295 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { 2587 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
2296 if (bnx2x_q_stats_arr[j].size == 0) { 2588 if (bnx2x_q_stats_arr[j].size == 0) {
2297 /* skip this counter */ 2589 /* skip this counter */
@@ -2375,6 +2667,41 @@ static int bnx2x_set_phys_id(struct net_device *dev,
2375 return 0; 2667 return 0;
2376} 2668}
2377 2669
2670static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
2671{
2672
2673 switch (info->flow_type) {
2674 case TCP_V4_FLOW:
2675 case TCP_V6_FLOW:
2676 info->data = RXH_IP_SRC | RXH_IP_DST |
2677 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2678 break;
2679 case UDP_V4_FLOW:
2680 if (bp->rss_conf_obj.udp_rss_v4)
2681 info->data = RXH_IP_SRC | RXH_IP_DST |
2682 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2683 else
2684 info->data = RXH_IP_SRC | RXH_IP_DST;
2685 break;
2686 case UDP_V6_FLOW:
2687 if (bp->rss_conf_obj.udp_rss_v6)
2688 info->data = RXH_IP_SRC | RXH_IP_DST |
2689 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2690 else
2691 info->data = RXH_IP_SRC | RXH_IP_DST;
2692 break;
2693 case IPV4_FLOW:
2694 case IPV6_FLOW:
2695 info->data = RXH_IP_SRC | RXH_IP_DST;
2696 break;
2697 default:
2698 info->data = 0;
2699 break;
2700 }
2701
2702 return 0;
2703}
2704
2378static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, 2705static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2379 u32 *rules __always_unused) 2706 u32 *rules __always_unused)
2380{ 2707{
@@ -2384,7 +2711,102 @@ static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2384 case ETHTOOL_GRXRINGS: 2711 case ETHTOOL_GRXRINGS:
2385 info->data = BNX2X_NUM_ETH_QUEUES(bp); 2712 info->data = BNX2X_NUM_ETH_QUEUES(bp);
2386 return 0; 2713 return 0;
2714 case ETHTOOL_GRXFH:
2715 return bnx2x_get_rss_flags(bp, info);
2716 default:
2717 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
2718 return -EOPNOTSUPP;
2719 }
2720}
2721
2722static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
2723{
2724 int udp_rss_requested;
2725
2726 DP(BNX2X_MSG_ETHTOOL,
2727 "Set rss flags command parameters: flow type = %d, data = %llu\n",
2728 info->flow_type, info->data);
2729
2730 switch (info->flow_type) {
2731 case TCP_V4_FLOW:
2732 case TCP_V6_FLOW:
2733 /* For TCP only 4-tupple hash is supported */
2734 if (info->data ^ (RXH_IP_SRC | RXH_IP_DST |
2735 RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2736 DP(BNX2X_MSG_ETHTOOL,
2737 "Command parameters not supported\n");
2738 return -EINVAL;
2739 } else {
2740 return 0;
2741 }
2387 2742
2743 case UDP_V4_FLOW:
2744 case UDP_V6_FLOW:
2745 /* For UDP either 2-tupple hash or 4-tupple hash is supported */
2746 if (info->data == (RXH_IP_SRC | RXH_IP_DST |
2747 RXH_L4_B_0_1 | RXH_L4_B_2_3))
2748 udp_rss_requested = 1;
2749 else if (info->data == (RXH_IP_SRC | RXH_IP_DST))
2750 udp_rss_requested = 0;
2751 else
2752 return -EINVAL;
2753 if ((info->flow_type == UDP_V4_FLOW) &&
2754 (bp->rss_conf_obj.udp_rss_v4 != udp_rss_requested)) {
2755 bp->rss_conf_obj.udp_rss_v4 = udp_rss_requested;
2756 DP(BNX2X_MSG_ETHTOOL,
2757 "rss re-configured, UDP 4-tupple %s\n",
2758 udp_rss_requested ? "enabled" : "disabled");
2759 return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
2760 } else if ((info->flow_type == UDP_V6_FLOW) &&
2761 (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
2762 bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
2763 return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
2764 DP(BNX2X_MSG_ETHTOOL,
2765 "rss re-configured, UDP 4-tupple %s\n",
2766 udp_rss_requested ? "enabled" : "disabled");
2767 } else {
2768 return 0;
2769 }
2770 case IPV4_FLOW:
2771 case IPV6_FLOW:
2772 /* For IP only 2-tupple hash is supported */
2773 if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) {
2774 DP(BNX2X_MSG_ETHTOOL,
2775 "Command parameters not supported\n");
2776 return -EINVAL;
2777 } else {
2778 return 0;
2779 }
2780 case SCTP_V4_FLOW:
2781 case AH_ESP_V4_FLOW:
2782 case AH_V4_FLOW:
2783 case ESP_V4_FLOW:
2784 case SCTP_V6_FLOW:
2785 case AH_ESP_V6_FLOW:
2786 case AH_V6_FLOW:
2787 case ESP_V6_FLOW:
2788 case IP_USER_FLOW:
2789 case ETHER_FLOW:
2790 /* RSS is not supported for these protocols */
2791 if (info->data) {
2792 DP(BNX2X_MSG_ETHTOOL,
2793 "Command parameters not supported\n");
2794 return -EINVAL;
2795 } else {
2796 return 0;
2797 }
2798 default:
2799 return -EINVAL;
2800 }
2801}
2802
2803static int bnx2x_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
2804{
2805 struct bnx2x *bp = netdev_priv(dev);
2806
2807 switch (info->cmd) {
2808 case ETHTOOL_SRXFH:
2809 return bnx2x_set_rss_flags(bp, info);
2388 default: 2810 default:
2389 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); 2811 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
2390 return -EOPNOTSUPP; 2812 return -EOPNOTSUPP;
@@ -2424,7 +2846,6 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir)
2424{ 2846{
2425 struct bnx2x *bp = netdev_priv(dev); 2847 struct bnx2x *bp = netdev_priv(dev);
2426 size_t i; 2848 size_t i;
2427 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
2428 2849
2429 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { 2850 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
2430 /* 2851 /*
@@ -2436,10 +2857,88 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir)
2436 * align the received table to the Client ID of the leading RSS 2857 * align the received table to the Client ID of the leading RSS
2437 * queue 2858 * queue
2438 */ 2859 */
2439 ind_table[i] = indir[i] + bp->fp->cl_id; 2860 bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
2440 } 2861 }
2441 2862
2442 return bnx2x_config_rss_eth(bp, ind_table, false); 2863 return bnx2x_config_rss_eth(bp, false);
2864}
2865
2866/**
2867 * bnx2x_get_channels - gets the number of RSS queues.
2868 *
2869 * @dev: net device
2870 * @channels: returns the number of max / current queues
2871 */
2872static void bnx2x_get_channels(struct net_device *dev,
2873 struct ethtool_channels *channels)
2874{
2875 struct bnx2x *bp = netdev_priv(dev);
2876
2877 channels->max_combined = BNX2X_MAX_RSS_COUNT(bp);
2878 channels->combined_count = BNX2X_NUM_ETH_QUEUES(bp);
2879}
2880
2881/**
2882 * bnx2x_change_num_queues - change the number of RSS queues.
2883 *
2884 * @bp: bnx2x private structure
2885 *
2886 * Re-configure interrupt mode to get the new number of MSI-X
2887 * vectors and re-add NAPI objects.
2888 */
2889static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
2890{
2891 bnx2x_del_all_napi(bp);
2892 bnx2x_disable_msi(bp);
2893 BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE;
2894 bnx2x_set_int_mode(bp);
2895 bnx2x_add_all_napi(bp);
2896}
2897
2898/**
2899 * bnx2x_set_channels - sets the number of RSS queues.
2900 *
2901 * @dev: net device
2902 * @channels: includes the number of queues requested
2903 */
2904static int bnx2x_set_channels(struct net_device *dev,
2905 struct ethtool_channels *channels)
2906{
2907 struct bnx2x *bp = netdev_priv(dev);
2908
2909
2910 DP(BNX2X_MSG_ETHTOOL,
2911 "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n",
2912 channels->rx_count, channels->tx_count, channels->other_count,
2913 channels->combined_count);
2914
2915 /* We don't support separate rx / tx channels.
2916 * We don't allow setting 'other' channels.
2917 */
2918 if (channels->rx_count || channels->tx_count || channels->other_count
2919 || (channels->combined_count == 0) ||
2920 (channels->combined_count > BNX2X_MAX_RSS_COUNT(bp))) {
2921 DP(BNX2X_MSG_ETHTOOL, "command parameters not supported\n");
2922 return -EINVAL;
2923 }
2924
2925 /* Check if there was a change in the active parameters */
2926 if (channels->combined_count == BNX2X_NUM_ETH_QUEUES(bp)) {
2927 DP(BNX2X_MSG_ETHTOOL, "No change in active parameters\n");
2928 return 0;
2929 }
2930
2931 /* Set the requested number of queues in bp context.
2932 * Note that the actual number of queues created during load may be
2933 * less than requested if memory is low.
2934 */
2935 if (unlikely(!netif_running(dev))) {
2936 bnx2x_change_num_queues(bp, channels->combined_count);
2937 return 0;
2938 }
2939 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2940 bnx2x_change_num_queues(bp, channels->combined_count);
2941 return bnx2x_nic_load(bp, LOAD_NORMAL);
2443} 2942}
2444 2943
2445static const struct ethtool_ops bnx2x_ethtool_ops = { 2944static const struct ethtool_ops bnx2x_ethtool_ops = {
@@ -2469,9 +2968,16 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
2469 .set_phys_id = bnx2x_set_phys_id, 2968 .set_phys_id = bnx2x_set_phys_id,
2470 .get_ethtool_stats = bnx2x_get_ethtool_stats, 2969 .get_ethtool_stats = bnx2x_get_ethtool_stats,
2471 .get_rxnfc = bnx2x_get_rxnfc, 2970 .get_rxnfc = bnx2x_get_rxnfc,
2971 .set_rxnfc = bnx2x_set_rxnfc,
2472 .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size, 2972 .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
2473 .get_rxfh_indir = bnx2x_get_rxfh_indir, 2973 .get_rxfh_indir = bnx2x_get_rxfh_indir,
2474 .set_rxfh_indir = bnx2x_set_rxfh_indir, 2974 .set_rxfh_indir = bnx2x_set_rxfh_indir,
2975 .get_channels = bnx2x_get_channels,
2976 .set_channels = bnx2x_set_channels,
2977 .get_module_info = bnx2x_get_module_info,
2978 .get_module_eeprom = bnx2x_get_module_eeprom,
2979 .get_eee = bnx2x_get_eee,
2980 .set_eee = bnx2x_set_eee,
2475}; 2981};
2476 2982
2477void bnx2x_set_ethtool_ops(struct net_device *netdev) 2983void bnx2x_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 426f77aa721a..bbc66ced9c25 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -321,9 +321,7 @@
321#define DISABLE_STATISTIC_COUNTER_ID_VALUE 0 321#define DISABLE_STATISTIC_COUNTER_ID_VALUE 0
322 322
323 323
324/** 324/* This file defines HSI constants common to all microcode flows */
325 * This file defines HSI constants common to all microcode flows
326 */
327 325
328#define PROTOCOL_STATE_BIT_OFFSET 6 326#define PROTOCOL_STATE_BIT_OFFSET 6
329 327
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index a440a8ba85f2..76b6e65790f8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -10,6 +10,7 @@
10#define BNX2X_HSI_H 10#define BNX2X_HSI_H
11 11
12#include "bnx2x_fw_defs.h" 12#include "bnx2x_fw_defs.h"
13#include "bnx2x_mfw_req.h"
13 14
14#define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e 15#define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e
15 16
@@ -33,12 +34,6 @@ struct license_key {
33 u32 reserved_b[4]; 34 u32 reserved_b[4];
34}; 35};
35 36
36
37#define PORT_0 0
38#define PORT_1 1
39#define PORT_MAX 2
40#define NVM_PATH_MAX 2
41
42/**************************************************************************** 37/****************************************************************************
43 * Shared HW configuration * 38 * Shared HW configuration *
44 ****************************************************************************/ 39 ****************************************************************************/
@@ -1067,8 +1062,18 @@ struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */
1067 uses the same defines as link_config */ 1062 uses the same defines as link_config */
1068 u32 mfw_wol_link_cfg2; /* 0x480 */ 1063 u32 mfw_wol_link_cfg2; /* 0x480 */
1069 1064
1070 u32 Reserved2[17]; /* 0x484 */
1071 1065
1066 /* EEE power saving mode */
1067 u32 eee_power_mode; /* 0x484 */
1068 #define PORT_FEAT_CFG_EEE_POWER_MODE_MASK 0x000000FF
1069 #define PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT 0
1070 #define PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED 0x00000000
1071 #define PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED 0x00000001
1072 #define PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE 0x00000002
1073 #define PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY 0x00000003
1074
1075
1076 u32 Reserved2[16]; /* 0x488 */
1072}; 1077};
1073 1078
1074 1079
@@ -1140,6 +1145,7 @@ struct drv_port_mb {
1140 u32 link_status; 1145 u32 link_status;
1141 /* Driver should update this field on any link change event */ 1146 /* Driver should update this field on any link change event */
1142 1147
1148 #define LINK_STATUS_NONE (0<<0)
1143 #define LINK_STATUS_LINK_FLAG_MASK 0x00000001 1149 #define LINK_STATUS_LINK_FLAG_MASK 0x00000001
1144 #define LINK_STATUS_LINK_UP 0x00000001 1150 #define LINK_STATUS_LINK_UP 0x00000001
1145 #define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E 1151 #define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E
@@ -1197,6 +1203,7 @@ struct drv_port_mb {
1197 #define LINK_STATUS_PFC_ENABLED 0x20000000 1203 #define LINK_STATUS_PFC_ENABLED 0x20000000
1198 1204
1199 #define LINK_STATUS_PHYSICAL_LINK_FLAG 0x40000000 1205 #define LINK_STATUS_PHYSICAL_LINK_FLAG 0x40000000
1206 #define LINK_STATUS_SFP_TX_FAULT 0x80000000
1200 1207
1201 u32 port_stx; 1208 u32 port_stx;
1202 1209
@@ -1240,9 +1247,11 @@ struct drv_func_mb {
1240 #define REQ_BC_VER_4_VRFY_AFEX_SUPPORTED 0x00070002 1247 #define REQ_BC_VER_4_VRFY_AFEX_SUPPORTED 0x00070002
1241 #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014 1248 #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014
1242 #define REQ_BC_VER_4_PFC_STATS_SUPPORTED 0x00070201 1249 #define REQ_BC_VER_4_PFC_STATS_SUPPORTED 0x00070201
1250 #define REQ_BC_VER_4_FCOE_FEATURES 0x00070209
1243 1251
1244 #define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000 1252 #define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000
1245 #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000 1253 #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000
1254 #define REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF 0x00070401
1246 1255
1247 #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 1256 #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
1248 1257
@@ -1255,6 +1264,8 @@ struct drv_func_mb {
1255 #define DRV_MSG_CODE_DRV_INFO_ACK 0xd8000000 1264 #define DRV_MSG_CODE_DRV_INFO_ACK 0xd8000000
1256 #define DRV_MSG_CODE_DRV_INFO_NACK 0xd9000000 1265 #define DRV_MSG_CODE_DRV_INFO_NACK 0xd9000000
1257 1266
1267 #define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000
1268
1258 #define DRV_MSG_CODE_SET_MF_BW 0xe0000000 1269 #define DRV_MSG_CODE_SET_MF_BW 0xe0000000
1259 #define REQ_BC_VER_4_SET_MF_BW 0x00060202 1270 #define REQ_BC_VER_4_SET_MF_BW 0x00060202
1260 #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000 1271 #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000
@@ -1320,6 +1331,8 @@ struct drv_func_mb {
1320 #define FW_MSG_CODE_DRV_INFO_ACK 0xd8100000 1331 #define FW_MSG_CODE_DRV_INFO_ACK 0xd8100000
1321 #define FW_MSG_CODE_DRV_INFO_NACK 0xd9100000 1332 #define FW_MSG_CODE_DRV_INFO_NACK 0xd9100000
1322 1333
1334 #define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000
1335
1323 #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000 1336 #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000
1324 #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000 1337 #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000
1325 1338
@@ -1383,6 +1396,8 @@ struct drv_func_mb {
1383 1396
1384 #define DRV_STATUS_DRV_INFO_REQ 0x04000000 1397 #define DRV_STATUS_DRV_INFO_REQ 0x04000000
1385 1398
1399 #define DRV_STATUS_EEE_NEGOTIATION_RESULTS 0x08000000
1400
1386 u32 virt_mac_upper; 1401 u32 virt_mac_upper;
1387 #define VIRT_MAC_SIGN_MASK 0xffff0000 1402 #define VIRT_MAC_SIGN_MASK 0xffff0000
1388 #define VIRT_MAC_SIGNATURE 0x564d0000 1403 #define VIRT_MAC_SIGNATURE 0x564d0000
@@ -1613,6 +1628,11 @@ struct fw_flr_mb {
1613 struct fw_flr_ack ack; 1628 struct fw_flr_ack ack;
1614}; 1629};
1615 1630
1631struct eee_remote_vals {
1632 u32 tx_tw;
1633 u32 rx_tw;
1634};
1635
1616/**** SUPPORT FOR SHMEM ARRRAYS *** 1636/**** SUPPORT FOR SHMEM ARRRAYS ***
1617 * The SHMEM HSI is aligned on 32 bit boundaries which makes it difficult to 1637 * The SHMEM HSI is aligned on 32 bit boundaries which makes it difficult to
1618 * define arrays with storage types smaller then unsigned dwords. 1638 * define arrays with storage types smaller then unsigned dwords.
@@ -2053,6 +2073,41 @@ struct shmem2_region {
2053#define DRV_INFO_CONTROL_OP_CODE_MASK 0x0000ff00 2073#define DRV_INFO_CONTROL_OP_CODE_MASK 0x0000ff00
2054#define DRV_INFO_CONTROL_OP_CODE_SHIFT 8 2074#define DRV_INFO_CONTROL_OP_CODE_SHIFT 8
2055 u32 ibft_host_addr; /* initialized by option ROM */ 2075 u32 ibft_host_addr; /* initialized by option ROM */
2076 struct eee_remote_vals eee_remote_vals[PORT_MAX];
2077 u32 reserved[E2_FUNC_MAX];
2078
2079
2080 /* the status of EEE auto-negotiation
2081 * bits 15:0 the configured tx-lpi entry timer value. Depends on bit 31.
2082 * bits 19:16 the supported modes for EEE.
2083 * bits 23:20 the speeds advertised for EEE.
2084 * bits 27:24 the speeds the Link partner advertised for EEE.
2085 * The supported/adv. modes in bits 27:19 originate from the
2086 * SHMEM_EEE_XXX_ADV definitions (where XXX is replaced by speed).
2087 * bit 28 when 1'b1 EEE was requested.
2088 * bit 29 when 1'b1 tx lpi was requested.
2089 * bit 30 when 1'b1 EEE was negotiated. Tx lpi will be asserted iff
2090 * 30:29 are 2'b11.
2091 * bit 31 when 1'b0 bits 15:0 contain a PORT_FEAT_CFG_EEE_ define as
2092 * value. When 1'b1 those bits contains a value times 16 microseconds.
2093 */
2094 u32 eee_status[PORT_MAX];
2095 #define SHMEM_EEE_TIMER_MASK 0x0000ffff
2096 #define SHMEM_EEE_SUPPORTED_MASK 0x000f0000
2097 #define SHMEM_EEE_SUPPORTED_SHIFT 16
2098 #define SHMEM_EEE_ADV_STATUS_MASK 0x00f00000
2099 #define SHMEM_EEE_100M_ADV (1<<0)
2100 #define SHMEM_EEE_1G_ADV (1<<1)
2101 #define SHMEM_EEE_10G_ADV (1<<2)
2102 #define SHMEM_EEE_ADV_STATUS_SHIFT 20
2103 #define SHMEM_EEE_LP_ADV_STATUS_MASK 0x0f000000
2104 #define SHMEM_EEE_LP_ADV_STATUS_SHIFT 24
2105 #define SHMEM_EEE_REQUESTED_BIT 0x10000000
2106 #define SHMEM_EEE_LPI_REQUESTED_BIT 0x20000000
2107 #define SHMEM_EEE_ACTIVE_BIT 0x40000000
2108 #define SHMEM_EEE_TIME_OUTPUT_BIT 0x80000000
2109
2110 u32 sizeof_port_stats;
2056}; 2111};
2057 2112
2058 2113
@@ -2599,6 +2654,9 @@ struct host_port_stats {
2599 u32 pfc_frames_tx_lo; 2654 u32 pfc_frames_tx_lo;
2600 u32 pfc_frames_rx_hi; 2655 u32 pfc_frames_rx_hi;
2601 u32 pfc_frames_rx_lo; 2656 u32 pfc_frames_rx_lo;
2657
2658 u32 eee_lpi_count_hi;
2659 u32 eee_lpi_count_lo;
2602}; 2660};
2603 2661
2604 2662
@@ -2638,118 +2696,6 @@ struct host_func_stats {
2638/* VIC definitions */ 2696/* VIC definitions */
2639#define VICSTATST_UIF_INDEX 2 2697#define VICSTATST_UIF_INDEX 2
2640 2698
2641/* current drv_info version */
2642#define DRV_INFO_CUR_VER 1
2643
2644/* drv_info op codes supported */
2645enum drv_info_opcode {
2646 ETH_STATS_OPCODE,
2647 FCOE_STATS_OPCODE,
2648 ISCSI_STATS_OPCODE
2649};
2650
2651#define ETH_STAT_INFO_VERSION_LEN 12
2652/* Per PCI Function Ethernet Statistics required from the driver */
2653struct eth_stats_info {
2654 /* Function's Driver Version. padded to 12 */
2655 u8 version[ETH_STAT_INFO_VERSION_LEN];
2656 /* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */
2657 u8 mac_local[8];
2658 u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
2659 u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
2660 u32 mtu_size; /* MTU Size. Note : Negotiated MTU */
2661 u32 feature_flags; /* Feature_Flags. */
2662#define FEATURE_ETH_CHKSUM_OFFLOAD_MASK 0x01
2663#define FEATURE_ETH_LSO_MASK 0x02
2664#define FEATURE_ETH_BOOTMODE_MASK 0x1C
2665#define FEATURE_ETH_BOOTMODE_SHIFT 2
2666#define FEATURE_ETH_BOOTMODE_NONE (0x0 << 2)
2667#define FEATURE_ETH_BOOTMODE_PXE (0x1 << 2)
2668#define FEATURE_ETH_BOOTMODE_ISCSI (0x2 << 2)
2669#define FEATURE_ETH_BOOTMODE_FCOE (0x3 << 2)
2670#define FEATURE_ETH_TOE_MASK 0x20
2671 u32 lso_max_size; /* LSO MaxOffloadSize. */
2672 u32 lso_min_seg_cnt; /* LSO MinSegmentCount. */
2673 /* Num Offloaded Connections TCP_IPv4. */
2674 u32 ipv4_ofld_cnt;
2675 /* Num Offloaded Connections TCP_IPv6. */
2676 u32 ipv6_ofld_cnt;
2677 u32 promiscuous_mode; /* Promiscuous Mode. non-zero true */
2678 u32 txq_size; /* TX Descriptors Queue Size */
2679 u32 rxq_size; /* RX Descriptors Queue Size */
2680 /* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */
2681 u32 txq_avg_depth;
2682 /* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */
2683 u32 rxq_avg_depth;
2684 /* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/
2685 u32 iov_offload;
2686 /* Number of NetQueue/VMQ Config'd. */
2687 u32 netq_cnt;
2688 u32 vf_cnt; /* Num VF assigned to this PF. */
2689};
2690
2691/* Per PCI Function FCOE Statistics required from the driver */
2692struct fcoe_stats_info {
2693 u8 version[12]; /* Function's Driver Version. */
2694 u8 mac_local[8]; /* Locally Admin Addr. */
2695 u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
2696 u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
2697 /* QoS Priority (per 802.1p). 0-7255 */
2698 u32 qos_priority;
2699 u32 txq_size; /* FCoE TX Descriptors Queue Size. */
2700 u32 rxq_size; /* FCoE RX Descriptors Queue Size. */
2701 /* FCoE TX Descriptor Queue Avg Depth. */
2702 u32 txq_avg_depth;
2703 /* FCoE RX Descriptors Queue Avg Depth. */
2704 u32 rxq_avg_depth;
2705 u32 rx_frames_lo; /* FCoE RX Frames received. */
2706 u32 rx_frames_hi; /* FCoE RX Frames received. */
2707 u32 rx_bytes_lo; /* FCoE RX Bytes received. */
2708 u32 rx_bytes_hi; /* FCoE RX Bytes received. */
2709 u32 tx_frames_lo; /* FCoE TX Frames sent. */
2710 u32 tx_frames_hi; /* FCoE TX Frames sent. */
2711 u32 tx_bytes_lo; /* FCoE TX Bytes sent. */
2712 u32 tx_bytes_hi; /* FCoE TX Bytes sent. */
2713};
2714
2715/* Per PCI Function iSCSI Statistics required from the driver*/
2716struct iscsi_stats_info {
2717 u8 version[12]; /* Function's Driver Version. */
2718 u8 mac_local[8]; /* Locally Admin iSCSI MAC Addr. */
2719 u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
2720 /* QoS Priority (per 802.1p). 0-7255 */
2721 u32 qos_priority;
2722 u8 initiator_name[64]; /* iSCSI Boot Initiator Node name. */
2723 u8 ww_port_name[64]; /* iSCSI World wide port name */
2724 u8 boot_target_name[64];/* iSCSI Boot Target Name. */
2725 u8 boot_target_ip[16]; /* iSCSI Boot Target IP. */
2726 u32 boot_target_portal; /* iSCSI Boot Target Portal. */
2727 u8 boot_init_ip[16]; /* iSCSI Boot Initiator IP Address. */
2728 u32 max_frame_size; /* Max Frame Size. bytes */
2729 u32 txq_size; /* PDU TX Descriptors Queue Size. */
2730 u32 rxq_size; /* PDU RX Descriptors Queue Size. */
2731 u32 txq_avg_depth; /* PDU TX Descriptor Queue Avg Depth. */
2732 u32 rxq_avg_depth; /* PDU RX Descriptors Queue Avg Depth. */
2733 u32 rx_pdus_lo; /* iSCSI PDUs received. */
2734 u32 rx_pdus_hi; /* iSCSI PDUs received. */
2735 u32 rx_bytes_lo; /* iSCSI RX Bytes received. */
2736 u32 rx_bytes_hi; /* iSCSI RX Bytes received. */
2737 u32 tx_pdus_lo; /* iSCSI PDUs sent. */
2738 u32 tx_pdus_hi; /* iSCSI PDUs sent. */
2739 u32 tx_bytes_lo; /* iSCSI PDU TX Bytes sent. */
2740 u32 tx_bytes_hi; /* iSCSI PDU TX Bytes sent. */
2741 u32 pcp_prior_map_tbl; /* C-PCP to S-PCP Priority MapTable.
2742 * 9 nibbles, the position of each nibble
2743 * represents the C-PCP value, the value
2744 * of the nibble = S-PCP value.
2745 */
2746};
2747
2748union drv_info_to_mcp {
2749 struct eth_stats_info ether_stat;
2750 struct fcoe_stats_info fcoe_stat;
2751 struct iscsi_stats_info iscsi_stat;
2752};
2753 2699
2754/* stats collected for afex. 2700/* stats collected for afex.
2755 * NOTE: structure is exactly as expected to be received by the switch. 2701 * NOTE: structure is exactly as expected to be received by the switch.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 6e7d5c0843b4..e04b282c039d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -285,7 +285,6 @@
285#define ETS_E3B0_PBF_MIN_W_VAL (10000) 285#define ETS_E3B0_PBF_MIN_W_VAL (10000)
286 286
287#define MAX_PACKET_SIZE (9700) 287#define MAX_PACKET_SIZE (9700)
288#define WC_UC_TIMEOUT 100
289#define MAX_KR_LINK_RETRY 4 288#define MAX_KR_LINK_RETRY 4
290 289
291/**********************************************************/ 290/**********************************************************/
@@ -1306,6 +1305,94 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
1306 1305
1307 return 0; 1306 return 0;
1308} 1307}
1308
1309/******************************************************************/
1310/* EEE section */
1311/******************************************************************/
1312static u8 bnx2x_eee_has_cap(struct link_params *params)
1313{
1314 struct bnx2x *bp = params->bp;
1315
1316 if (REG_RD(bp, params->shmem2_base) <=
1317 offsetof(struct shmem2_region, eee_status[params->port]))
1318 return 0;
1319
1320 return 1;
1321}
1322
1323static int bnx2x_eee_nvram_to_time(u32 nvram_mode, u32 *idle_timer)
1324{
1325 switch (nvram_mode) {
1326 case PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED:
1327 *idle_timer = EEE_MODE_NVRAM_BALANCED_TIME;
1328 break;
1329 case PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE:
1330 *idle_timer = EEE_MODE_NVRAM_AGGRESSIVE_TIME;
1331 break;
1332 case PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY:
1333 *idle_timer = EEE_MODE_NVRAM_LATENCY_TIME;
1334 break;
1335 default:
1336 *idle_timer = 0;
1337 break;
1338 }
1339
1340 return 0;
1341}
1342
1343static int bnx2x_eee_time_to_nvram(u32 idle_timer, u32 *nvram_mode)
1344{
1345 switch (idle_timer) {
1346 case EEE_MODE_NVRAM_BALANCED_TIME:
1347 *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED;
1348 break;
1349 case EEE_MODE_NVRAM_AGGRESSIVE_TIME:
1350 *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE;
1351 break;
1352 case EEE_MODE_NVRAM_LATENCY_TIME:
1353 *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY;
1354 break;
1355 default:
1356 *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED;
1357 break;
1358 }
1359
1360 return 0;
1361}
1362
1363static u32 bnx2x_eee_calc_timer(struct link_params *params)
1364{
1365 u32 eee_mode, eee_idle;
1366 struct bnx2x *bp = params->bp;
1367
1368 if (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) {
1369 if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
1370 /* time value in eee_mode --> used directly*/
1371 eee_idle = params->eee_mode & EEE_MODE_TIMER_MASK;
1372 } else {
1373 /* hsi value in eee_mode --> time */
1374 if (bnx2x_eee_nvram_to_time(params->eee_mode &
1375 EEE_MODE_NVRAM_MASK,
1376 &eee_idle))
1377 return 0;
1378 }
1379 } else {
1380 /* hsi values in nvram --> time*/
1381 eee_mode = ((REG_RD(bp, params->shmem_base +
1382 offsetof(struct shmem_region, dev_info.
1383 port_feature_config[params->port].
1384 eee_power_mode)) &
1385 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
1386 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
1387
1388 if (bnx2x_eee_nvram_to_time(eee_mode, &eee_idle))
1389 return 0;
1390 }
1391
1392 return eee_idle;
1393}
1394
1395
1309/******************************************************************/ 1396/******************************************************************/
1310/* PFC section */ 1397/* PFC section */
1311/******************************************************************/ 1398/******************************************************************/
@@ -1540,7 +1627,7 @@ static void bnx2x_umac_enable(struct link_params *params,
1540 /* Reset UMAC */ 1627 /* Reset UMAC */
1541 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 1628 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
1542 (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)); 1629 (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
1543 usleep_range(1000, 1000); 1630 usleep_range(1000, 2000);
1544 1631
1545 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 1632 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
1546 (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)); 1633 (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
@@ -1642,7 +1729,7 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
1642 /* Hard reset */ 1729 /* Hard reset */
1643 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 1730 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
1644 MISC_REGISTERS_RESET_REG_2_XMAC); 1731 MISC_REGISTERS_RESET_REG_2_XMAC);
1645 usleep_range(1000, 1000); 1732 usleep_range(1000, 2000);
1646 1733
1647 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 1734 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
1648 MISC_REGISTERS_RESET_REG_2_XMAC); 1735 MISC_REGISTERS_RESET_REG_2_XMAC);
@@ -1672,7 +1759,7 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
1672 /* Soft reset */ 1759 /* Soft reset */
1673 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 1760 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
1674 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT); 1761 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
1675 usleep_range(1000, 1000); 1762 usleep_range(1000, 2000);
1676 1763
1677 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 1764 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
1678 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT); 1765 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
@@ -1730,6 +1817,14 @@ static int bnx2x_xmac_enable(struct link_params *params,
1730 /* update PFC */ 1817 /* update PFC */
1731 bnx2x_update_pfc_xmac(params, vars, 0); 1818 bnx2x_update_pfc_xmac(params, vars, 0);
1732 1819
1820 if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) {
1821 DP(NETIF_MSG_LINK, "Setting XMAC for EEE\n");
1822 REG_WR(bp, xmac_base + XMAC_REG_EEE_TIMERS_HI, 0x1380008);
1823 REG_WR(bp, xmac_base + XMAC_REG_EEE_CTRL, 0x1);
1824 } else {
1825 REG_WR(bp, xmac_base + XMAC_REG_EEE_CTRL, 0x0);
1826 }
1827
1733 /* Enable TX and RX */ 1828 /* Enable TX and RX */
1734 val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN; 1829 val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN;
1735 1830
@@ -1785,11 +1880,6 @@ static int bnx2x_emac_enable(struct link_params *params,
1785 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE, 1880 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
1786 EMAC_TX_MODE_RESET); 1881 EMAC_TX_MODE_RESET);
1787 1882
1788 if (CHIP_REV_IS_SLOW(bp)) {
1789 /* config GMII mode */
1790 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
1791 EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
1792 } else { /* ASIC */
1793 /* pause enable/disable */ 1883 /* pause enable/disable */
1794 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE, 1884 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
1795 EMAC_RX_MODE_FLOW_EN); 1885 EMAC_RX_MODE_FLOW_EN);
@@ -1812,7 +1902,6 @@ static int bnx2x_emac_enable(struct link_params *params,
1812 } else 1902 } else
1813 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE, 1903 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
1814 EMAC_TX_MODE_FLOW_EN); 1904 EMAC_TX_MODE_FLOW_EN);
1815 }
1816 1905
1817 /* KEEP_VLAN_TAG, promiscuous */ 1906 /* KEEP_VLAN_TAG, promiscuous */
1818 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE); 1907 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
@@ -1851,23 +1940,23 @@ static int bnx2x_emac_enable(struct link_params *params,
1851 val &= ~0x810; 1940 val &= ~0x810;
1852 EMAC_WR(bp, EMAC_REG_EMAC_MODE, val); 1941 EMAC_WR(bp, EMAC_REG_EMAC_MODE, val);
1853 1942
1854 /* enable emac */ 1943 /* Enable emac */
1855 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 1); 1944 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 1);
1856 1945
1857 /* enable emac for jumbo packets */ 1946 /* Enable emac for jumbo packets */
1858 EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE, 1947 EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE,
1859 (EMAC_RX_MTU_SIZE_JUMBO_ENA | 1948 (EMAC_RX_MTU_SIZE_JUMBO_ENA |
1860 (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD))); 1949 (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)));
1861 1950
1862 /* strip CRC */ 1951 /* Strip CRC */
1863 REG_WR(bp, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1); 1952 REG_WR(bp, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
1864 1953
1865 /* disable the NIG in/out to the bmac */ 1954 /* Disable the NIG in/out to the bmac */
1866 REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x0); 1955 REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x0);
1867 REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0); 1956 REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
1868 REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x0); 1957 REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
1869 1958
1870 /* enable the NIG in/out to the emac */ 1959 /* Enable the NIG in/out to the emac */
1871 REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1); 1960 REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1);
1872 val = 0; 1961 val = 0;
1873 if ((params->feature_config_flags & 1962 if ((params->feature_config_flags &
@@ -1902,7 +1991,7 @@ static void bnx2x_update_pfc_bmac1(struct link_params *params,
1902 wb_data[1] = 0; 1991 wb_data[1] = 0;
1903 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2); 1992 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2);
1904 1993
1905 /* tx control */ 1994 /* TX control */
1906 val = 0xc0; 1995 val = 0xc0;
1907 if (!(params->feature_config_flags & 1996 if (!(params->feature_config_flags &
1908 FEATURE_CONFIG_PFC_ENABLED) && 1997 FEATURE_CONFIG_PFC_ENABLED) &&
@@ -1962,7 +2051,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
1962 wb_data[0] &= ~(1<<2); 2051 wb_data[0] &= ~(1<<2);
1963 } else { 2052 } else {
1964 DP(NETIF_MSG_LINK, "PFC is disabled\n"); 2053 DP(NETIF_MSG_LINK, "PFC is disabled\n");
1965 /* disable PFC RX & TX & STATS and set 8 COS */ 2054 /* Disable PFC RX & TX & STATS and set 8 COS */
1966 wb_data[0] = 0x8; 2055 wb_data[0] = 0x8;
1967 wb_data[1] = 0; 2056 wb_data[1] = 0;
1968 } 2057 }
@@ -2056,7 +2145,7 @@ static int bnx2x_pfc_brb_get_config_params(
2056 PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE; 2145 PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE;
2057 config_val->pauseable_th.full_xon = 2146 config_val->pauseable_th.full_xon =
2058 PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE; 2147 PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE;
2059 /* non pause able*/ 2148 /* Non pause able*/
2060 config_val->non_pauseable_th.pause_xoff = 2149 config_val->non_pauseable_th.pause_xoff =
2061 PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; 2150 PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
2062 config_val->non_pauseable_th.pause_xon = 2151 config_val->non_pauseable_th.pause_xon =
@@ -2084,7 +2173,7 @@ static int bnx2x_pfc_brb_get_config_params(
2084 PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE; 2173 PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE;
2085 config_val->pauseable_th.full_xon = 2174 config_val->pauseable_th.full_xon =
2086 PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE; 2175 PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE;
2087 /* non pause able*/ 2176 /* Non pause able*/
2088 config_val->non_pauseable_th.pause_xoff = 2177 config_val->non_pauseable_th.pause_xoff =
2089 PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; 2178 PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
2090 config_val->non_pauseable_th.pause_xon = 2179 config_val->non_pauseable_th.pause_xon =
@@ -2114,7 +2203,7 @@ static int bnx2x_pfc_brb_get_config_params(
2114 PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE; 2203 PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE;
2115 config_val->pauseable_th.full_xon = 2204 config_val->pauseable_th.full_xon =
2116 PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE; 2205 PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE;
2117 /* non pause able*/ 2206 /* Non pause able*/
2118 config_val->non_pauseable_th.pause_xoff = 2207 config_val->non_pauseable_th.pause_xoff =
2119 PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; 2208 PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
2120 config_val->non_pauseable_th.pause_xon = 2209 config_val->non_pauseable_th.pause_xon =
@@ -2132,7 +2221,7 @@ static int bnx2x_pfc_brb_get_config_params(
2132 PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE; 2221 PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE;
2133 config_val->pauseable_th.full_xon = 2222 config_val->pauseable_th.full_xon =
2134 PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE; 2223 PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE;
2135 /* non pause able*/ 2224 /* Non pause able*/
2136 config_val->non_pauseable_th.pause_xoff = 2225 config_val->non_pauseable_th.pause_xoff =
2137 PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; 2226 PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
2138 config_val->non_pauseable_th.pause_xon = 2227 config_val->non_pauseable_th.pause_xon =
@@ -2189,7 +2278,7 @@ static void bnx2x_pfc_brb_get_e3b0_config_params(
2189 2278
2190 if (pfc_params->cos0_pauseable != 2279 if (pfc_params->cos0_pauseable !=
2191 pfc_params->cos1_pauseable) { 2280 pfc_params->cos1_pauseable) {
2192 /* nonpauseable= Lossy + pauseable = Lossless*/ 2281 /* Nonpauseable= Lossy + pauseable = Lossless*/
2193 e3b0_val->lb_guarantied = 2282 e3b0_val->lb_guarantied =
2194 PFC_E3B0_2P_MIX_PAUSE_LB_GUART; 2283 PFC_E3B0_2P_MIX_PAUSE_LB_GUART;
2195 e3b0_val->mac_0_class_t_guarantied = 2284 e3b0_val->mac_0_class_t_guarantied =
@@ -2388,9 +2477,9 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
2388* This function is needed because NIG ARB_CREDIT_WEIGHT_X are 2477* This function is needed because NIG ARB_CREDIT_WEIGHT_X are
2389* not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable. 2478* not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
2390******************************************************************************/ 2479******************************************************************************/
2391int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp, 2480static int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp,
2392 u8 cos_entry, 2481 u8 cos_entry,
2393 u32 priority_mask, u8 port) 2482 u32 priority_mask, u8 port)
2394{ 2483{
2395 u32 nig_reg_rx_priority_mask_add = 0; 2484 u32 nig_reg_rx_priority_mask_add = 0;
2396 2485
@@ -2440,6 +2529,16 @@ static void bnx2x_update_mng(struct link_params *params, u32 link_status)
2440 port_mb[params->port].link_status), link_status); 2529 port_mb[params->port].link_status), link_status);
2441} 2530}
2442 2531
2532static void bnx2x_update_mng_eee(struct link_params *params, u32 eee_status)
2533{
2534 struct bnx2x *bp = params->bp;
2535
2536 if (bnx2x_eee_has_cap(params))
2537 REG_WR(bp, params->shmem2_base +
2538 offsetof(struct shmem2_region,
2539 eee_status[params->port]), eee_status);
2540}
2541
2443static void bnx2x_update_pfc_nig(struct link_params *params, 2542static void bnx2x_update_pfc_nig(struct link_params *params,
2444 struct link_vars *vars, 2543 struct link_vars *vars,
2445 struct bnx2x_nig_brb_pfc_port_params *nig_params) 2544 struct bnx2x_nig_brb_pfc_port_params *nig_params)
@@ -2507,7 +2606,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
2507 REG_WR(bp, port ? NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 : 2606 REG_WR(bp, port ? NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 :
2508 NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7); 2607 NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
2509 2608
2510 /* output enable for RX_XCM # IF */ 2609 /* Output enable for RX_XCM # IF */
2511 REG_WR(bp, port ? NIG_REG_XCM1_OUT_EN : 2610 REG_WR(bp, port ? NIG_REG_XCM1_OUT_EN :
2512 NIG_REG_XCM0_OUT_EN, xcm_out_en); 2611 NIG_REG_XCM0_OUT_EN, xcm_out_en);
2513 2612
@@ -2556,10 +2655,10 @@ int bnx2x_update_pfc(struct link_params *params,
2556 2655
2557 bnx2x_update_mng(params, vars->link_status); 2656 bnx2x_update_mng(params, vars->link_status);
2558 2657
2559 /* update NIG params */ 2658 /* Update NIG params */
2560 bnx2x_update_pfc_nig(params, vars, pfc_params); 2659 bnx2x_update_pfc_nig(params, vars, pfc_params);
2561 2660
2562 /* update BRB params */ 2661 /* Update BRB params */
2563 bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params); 2662 bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params);
2564 if (bnx2x_status) 2663 if (bnx2x_status)
2565 return bnx2x_status; 2664 return bnx2x_status;
@@ -2614,7 +2713,7 @@ static int bnx2x_bmac1_enable(struct link_params *params,
2614 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL, 2713 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
2615 wb_data, 2); 2714 wb_data, 2);
2616 2715
2617 /* tx MAC SA */ 2716 /* TX MAC SA */
2618 wb_data[0] = ((params->mac_addr[2] << 24) | 2717 wb_data[0] = ((params->mac_addr[2] << 24) |
2619 (params->mac_addr[3] << 16) | 2718 (params->mac_addr[3] << 16) |
2620 (params->mac_addr[4] << 8) | 2719 (params->mac_addr[4] << 8) |
@@ -2623,7 +2722,7 @@ static int bnx2x_bmac1_enable(struct link_params *params,
2623 params->mac_addr[1]); 2722 params->mac_addr[1]);
2624 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2); 2723 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2);
2625 2724
2626 /* mac control */ 2725 /* MAC control */
2627 val = 0x3; 2726 val = 0x3;
2628 if (is_lb) { 2727 if (is_lb) {
2629 val |= 0x4; 2728 val |= 0x4;
@@ -2633,24 +2732,24 @@ static int bnx2x_bmac1_enable(struct link_params *params,
2633 wb_data[1] = 0; 2732 wb_data[1] = 0;
2634 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2); 2733 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2);
2635 2734
2636 /* set rx mtu */ 2735 /* Set rx mtu */
2637 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 2736 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
2638 wb_data[1] = 0; 2737 wb_data[1] = 0;
2639 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2); 2738 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2);
2640 2739
2641 bnx2x_update_pfc_bmac1(params, vars); 2740 bnx2x_update_pfc_bmac1(params, vars);
2642 2741
2643 /* set tx mtu */ 2742 /* Set tx mtu */
2644 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 2743 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
2645 wb_data[1] = 0; 2744 wb_data[1] = 0;
2646 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2); 2745 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2);
2647 2746
2648 /* set cnt max size */ 2747 /* Set cnt max size */
2649 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 2748 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
2650 wb_data[1] = 0; 2749 wb_data[1] = 0;
2651 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2); 2750 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2);
2652 2751
2653 /* configure safc */ 2752 /* Configure SAFC */
2654 wb_data[0] = 0x1000200; 2753 wb_data[0] = 0x1000200;
2655 wb_data[1] = 0; 2754 wb_data[1] = 0;
2656 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS, 2755 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
@@ -2684,7 +2783,7 @@ static int bnx2x_bmac2_enable(struct link_params *params,
2684 2783
2685 udelay(30); 2784 udelay(30);
2686 2785
2687 /* tx MAC SA */ 2786 /* TX MAC SA */
2688 wb_data[0] = ((params->mac_addr[2] << 24) | 2787 wb_data[0] = ((params->mac_addr[2] << 24) |
2689 (params->mac_addr[3] << 16) | 2788 (params->mac_addr[3] << 16) |
2690 (params->mac_addr[4] << 8) | 2789 (params->mac_addr[4] << 8) |
@@ -2703,18 +2802,18 @@ static int bnx2x_bmac2_enable(struct link_params *params,
2703 wb_data, 2); 2802 wb_data, 2);
2704 udelay(30); 2803 udelay(30);
2705 2804
2706 /* set rx mtu */ 2805 /* Set RX MTU */
2707 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 2806 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
2708 wb_data[1] = 0; 2807 wb_data[1] = 0;
2709 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2); 2808 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2);
2710 udelay(30); 2809 udelay(30);
2711 2810
2712 /* set tx mtu */ 2811 /* Set TX MTU */
2713 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 2812 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
2714 wb_data[1] = 0; 2813 wb_data[1] = 0;
2715 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2); 2814 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2);
2716 udelay(30); 2815 udelay(30);
2717 /* set cnt max size */ 2816 /* Set cnt max size */
2718 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2; 2817 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2;
2719 wb_data[1] = 0; 2818 wb_data[1] = 0;
2720 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2); 2819 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2);
@@ -2732,15 +2831,15 @@ static int bnx2x_bmac_enable(struct link_params *params,
2732 u8 port = params->port; 2831 u8 port = params->port;
2733 struct bnx2x *bp = params->bp; 2832 struct bnx2x *bp = params->bp;
2734 u32 val; 2833 u32 val;
2735 /* reset and unreset the BigMac */ 2834 /* Reset and unreset the BigMac */
2736 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 2835 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2737 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 2836 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2738 msleep(1); 2837 usleep_range(1000, 2000);
2739 2838
2740 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 2839 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2741 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 2840 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2742 2841
2743 /* enable access for bmac registers */ 2842 /* Enable access for bmac registers */
2744 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1); 2843 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
2745 2844
2746 /* Enable BMAC according to BMAC type*/ 2845 /* Enable BMAC according to BMAC type*/
@@ -2798,7 +2897,7 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
2798 BIGMAC_REGISTER_BMAC_CONTROL, 2897 BIGMAC_REGISTER_BMAC_CONTROL,
2799 wb_data, 2); 2898 wb_data, 2);
2800 } 2899 }
2801 msleep(1); 2900 usleep_range(1000, 2000);
2802 } 2901 }
2803} 2902}
2804 2903
@@ -2810,17 +2909,16 @@ static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
2810 u32 init_crd, crd; 2909 u32 init_crd, crd;
2811 u32 count = 1000; 2910 u32 count = 1000;
2812 2911
2813 /* disable port */ 2912 /* Disable port */
2814 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1); 2913 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
2815 2914
2816 /* wait for init credit */ 2915 /* Wait for init credit */
2817 init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4); 2916 init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
2818 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8); 2917 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2819 DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd); 2918 DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd);
2820 2919
2821 while ((init_crd != crd) && count) { 2920 while ((init_crd != crd) && count) {
2822 msleep(5); 2921 usleep_range(5000, 10000);
2823
2824 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8); 2922 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2825 count--; 2923 count--;
2826 } 2924 }
@@ -2837,18 +2935,18 @@ static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
2837 line_speed == SPEED_1000 || 2935 line_speed == SPEED_1000 ||
2838 line_speed == SPEED_2500) { 2936 line_speed == SPEED_2500) {
2839 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1); 2937 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1);
2840 /* update threshold */ 2938 /* Update threshold */
2841 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0); 2939 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
2842 /* update init credit */ 2940 /* Update init credit */
2843 init_crd = 778; /* (800-18-4) */ 2941 init_crd = 778; /* (800-18-4) */
2844 2942
2845 } else { 2943 } else {
2846 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + 2944 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
2847 ETH_OVREHEAD)/16; 2945 ETH_OVREHEAD)/16;
2848 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); 2946 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
2849 /* update threshold */ 2947 /* Update threshold */
2850 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh); 2948 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
2851 /* update init credit */ 2949 /* Update init credit */
2852 switch (line_speed) { 2950 switch (line_speed) {
2853 case SPEED_10000: 2951 case SPEED_10000:
2854 init_crd = thresh + 553 - 22; 2952 init_crd = thresh + 553 - 22;
@@ -2863,12 +2961,12 @@ static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
2863 DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n", 2961 DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
2864 line_speed, init_crd); 2962 line_speed, init_crd);
2865 2963
2866 /* probe the credit changes */ 2964 /* Probe the credit changes */
2867 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1); 2965 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
2868 msleep(5); 2966 usleep_range(5000, 10000);
2869 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0); 2967 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
2870 2968
2871 /* enable port */ 2969 /* Enable port */
2872 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0); 2970 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
2873 return 0; 2971 return 0;
2874} 2972}
@@ -2935,7 +3033,7 @@ static int bnx2x_cl22_write(struct bnx2x *bp,
2935 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, 3033 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
2936 mode & ~EMAC_MDIO_MODE_CLAUSE_45); 3034 mode & ~EMAC_MDIO_MODE_CLAUSE_45);
2937 3035
2938 /* address */ 3036 /* Address */
2939 tmp = ((phy->addr << 21) | (reg << 16) | val | 3037 tmp = ((phy->addr << 21) | (reg << 16) | val |
2940 EMAC_MDIO_COMM_COMMAND_WRITE_22 | 3038 EMAC_MDIO_COMM_COMMAND_WRITE_22 |
2941 EMAC_MDIO_COMM_START_BUSY); 3039 EMAC_MDIO_COMM_START_BUSY);
@@ -2971,7 +3069,7 @@ static int bnx2x_cl22_read(struct bnx2x *bp,
2971 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, 3069 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
2972 mode & ~EMAC_MDIO_MODE_CLAUSE_45); 3070 mode & ~EMAC_MDIO_MODE_CLAUSE_45);
2973 3071
2974 /* address */ 3072 /* Address */
2975 val = ((phy->addr << 21) | (reg << 16) | 3073 val = ((phy->addr << 21) | (reg << 16) |
2976 EMAC_MDIO_COMM_COMMAND_READ_22 | 3074 EMAC_MDIO_COMM_COMMAND_READ_22 |
2977 EMAC_MDIO_COMM_START_BUSY); 3075 EMAC_MDIO_COMM_START_BUSY);
@@ -3009,7 +3107,7 @@ static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
3009 if (phy->flags & FLAGS_MDC_MDIO_WA_B0) 3107 if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
3010 bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, 3108 bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
3011 EMAC_MDIO_STATUS_10MB); 3109 EMAC_MDIO_STATUS_10MB);
3012 /* address */ 3110 /* Address */
3013 val = ((phy->addr << 21) | (devad << 16) | reg | 3111 val = ((phy->addr << 21) | (devad << 16) | reg |
3014 EMAC_MDIO_COMM_COMMAND_ADDRESS | 3112 EMAC_MDIO_COMM_COMMAND_ADDRESS |
3015 EMAC_MDIO_COMM_START_BUSY); 3113 EMAC_MDIO_COMM_START_BUSY);
@@ -3030,7 +3128,7 @@ static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
3030 *ret_val = 0; 3128 *ret_val = 0;
3031 rc = -EFAULT; 3129 rc = -EFAULT;
3032 } else { 3130 } else {
3033 /* data */ 3131 /* Data */
3034 val = ((phy->addr << 21) | (devad << 16) | 3132 val = ((phy->addr << 21) | (devad << 16) |
3035 EMAC_MDIO_COMM_COMMAND_READ_45 | 3133 EMAC_MDIO_COMM_COMMAND_READ_45 |
3036 EMAC_MDIO_COMM_START_BUSY); 3134 EMAC_MDIO_COMM_START_BUSY);
@@ -3078,7 +3176,7 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
3078 bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, 3176 bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
3079 EMAC_MDIO_STATUS_10MB); 3177 EMAC_MDIO_STATUS_10MB);
3080 3178
3081 /* address */ 3179 /* Address */
3082 tmp = ((phy->addr << 21) | (devad << 16) | reg | 3180 tmp = ((phy->addr << 21) | (devad << 16) | reg |
3083 EMAC_MDIO_COMM_COMMAND_ADDRESS | 3181 EMAC_MDIO_COMM_COMMAND_ADDRESS |
3084 EMAC_MDIO_COMM_START_BUSY); 3182 EMAC_MDIO_COMM_START_BUSY);
@@ -3098,7 +3196,7 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
3098 netdev_err(bp->dev, "MDC/MDIO access timeout\n"); 3196 netdev_err(bp->dev, "MDC/MDIO access timeout\n");
3099 rc = -EFAULT; 3197 rc = -EFAULT;
3100 } else { 3198 } else {
3101 /* data */ 3199 /* Data */
3102 tmp = ((phy->addr << 21) | (devad << 16) | val | 3200 tmp = ((phy->addr << 21) | (devad << 16) | val |
3103 EMAC_MDIO_COMM_COMMAND_WRITE_45 | 3201 EMAC_MDIO_COMM_COMMAND_WRITE_45 |
3104 EMAC_MDIO_COMM_START_BUSY); 3202 EMAC_MDIO_COMM_START_BUSY);
@@ -3188,23 +3286,23 @@ static int bnx2x_bsc_read(struct link_params *params,
3188 3286
3189 xfer_cnt = 16 - lc_addr; 3287 xfer_cnt = 16 - lc_addr;
3190 3288
3191 /* enable the engine */ 3289 /* Enable the engine */
3192 val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); 3290 val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
3193 val |= MCPR_IMC_COMMAND_ENABLE; 3291 val |= MCPR_IMC_COMMAND_ENABLE;
3194 REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); 3292 REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
3195 3293
3196 /* program slave device ID */ 3294 /* Program slave device ID */
3197 val = (sl_devid << 16) | sl_addr; 3295 val = (sl_devid << 16) | sl_addr;
3198 REG_WR(bp, MCP_REG_MCPR_IMC_SLAVE_CONTROL, val); 3296 REG_WR(bp, MCP_REG_MCPR_IMC_SLAVE_CONTROL, val);
3199 3297
3200 /* start xfer with 0 byte to update the address pointer ???*/ 3298 /* Start xfer with 0 byte to update the address pointer ???*/
3201 val = (MCPR_IMC_COMMAND_ENABLE) | 3299 val = (MCPR_IMC_COMMAND_ENABLE) |
3202 (MCPR_IMC_COMMAND_WRITE_OP << 3300 (MCPR_IMC_COMMAND_WRITE_OP <<
3203 MCPR_IMC_COMMAND_OPERATION_BITSHIFT) | 3301 MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
3204 (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | (0); 3302 (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | (0);
3205 REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); 3303 REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
3206 3304
3207 /* poll for completion */ 3305 /* Poll for completion */
3208 i = 0; 3306 i = 0;
3209 val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); 3307 val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
3210 while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) { 3308 while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
@@ -3220,7 +3318,7 @@ static int bnx2x_bsc_read(struct link_params *params,
3220 if (rc == -EFAULT) 3318 if (rc == -EFAULT)
3221 return rc; 3319 return rc;
3222 3320
3223 /* start xfer with read op */ 3321 /* Start xfer with read op */
3224 val = (MCPR_IMC_COMMAND_ENABLE) | 3322 val = (MCPR_IMC_COMMAND_ENABLE) |
3225 (MCPR_IMC_COMMAND_READ_OP << 3323 (MCPR_IMC_COMMAND_READ_OP <<
3226 MCPR_IMC_COMMAND_OPERATION_BITSHIFT) | 3324 MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
@@ -3228,7 +3326,7 @@ static int bnx2x_bsc_read(struct link_params *params,
3228 (xfer_cnt); 3326 (xfer_cnt);
3229 REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); 3327 REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
3230 3328
3231 /* poll for completion */ 3329 /* Poll for completion */
3232 i = 0; 3330 i = 0;
3233 val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); 3331 val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
3234 while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) { 3332 while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
@@ -3331,7 +3429,7 @@ static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy,
3331 port = port ^ 1; 3429 port = port ^ 1;
3332 3430
3333 lane = (port<<1) + path; 3431 lane = (port<<1) + path;
3334 } else { /* two port mode - no port swap */ 3432 } else { /* Two port mode - no port swap */
3335 3433
3336 /* Figure out path swap value */ 3434 /* Figure out path swap value */
3337 path_swap_ovr = 3435 path_swap_ovr =
@@ -3409,7 +3507,7 @@ static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
3409 3507
3410 val = SERDES_RESET_BITS << (port*16); 3508 val = SERDES_RESET_BITS << (port*16);
3411 3509
3412 /* reset and unreset the SerDes/XGXS */ 3510 /* Reset and unreset the SerDes/XGXS */
3413 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val); 3511 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
3414 udelay(500); 3512 udelay(500);
3415 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); 3513 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
@@ -3430,7 +3528,7 @@ static void bnx2x_xgxs_deassert(struct link_params *params)
3430 3528
3431 val = XGXS_RESET_BITS << (port*16); 3529 val = XGXS_RESET_BITS << (port*16);
3432 3530
3433 /* reset and unreset the SerDes/XGXS */ 3531 /* Reset and unreset the SerDes/XGXS */
3434 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val); 3532 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
3435 udelay(500); 3533 udelay(500);
3436 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); 3534 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
@@ -3522,7 +3620,7 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params,
3522{ 3620{
3523 u16 val; 3621 u16 val;
3524 struct bnx2x *bp = params->bp; 3622 struct bnx2x *bp = params->bp;
3525 /* read modify write pause advertizing */ 3623 /* Read modify write pause advertizing */
3526 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val); 3624 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val);
3527 3625
3528 val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH; 3626 val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
@@ -3657,44 +3755,35 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
3657static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, 3755static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3658 struct link_params *params, 3756 struct link_params *params,
3659 struct link_vars *vars) { 3757 struct link_vars *vars) {
3660 u16 val16 = 0, lane, bam37 = 0; 3758 u16 val16 = 0, lane, i;
3661 struct bnx2x *bp = params->bp; 3759 struct bnx2x *bp = params->bp;
3760 static struct bnx2x_reg_set reg_set[] = {
3761 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
3762 {MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 0},
3763 {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0},
3764 {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0xff},
3765 {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0x5555},
3766 {MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0},
3767 {MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415},
3768 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190},
3769 /* Disable Autoneg: re-enable it after adv is done. */
3770 {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0}
3771 };
3662 DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n"); 3772 DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n");
3663 /* Set to default registers that may be overriden by 10G force */ 3773 /* Set to default registers that may be overriden by 10G force */
3664 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3774 for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
3665 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7); 3775 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
3666 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3776 reg_set[i].val);
3667 MDIO_WC_REG_PAR_DET_10G_CTRL, 0);
3668 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3669 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0);
3670 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3671 MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0xff);
3672 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3673 MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0x5555);
3674 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
3675 MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0);
3676 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3677 MDIO_WC_REG_RX66_CONTROL, 0x7415);
3678 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3679 MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190);
3680 /* Disable Autoneg: re-enable it after adv is done. */
3681 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3682 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0);
3683 3777
3684 /* Check adding advertisement for 1G KX */ 3778 /* Check adding advertisement for 1G KX */
3685 if (((vars->line_speed == SPEED_AUTO_NEG) && 3779 if (((vars->line_speed == SPEED_AUTO_NEG) &&
3686 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || 3780 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
3687 (vars->line_speed == SPEED_1000)) { 3781 (vars->line_speed == SPEED_1000)) {
3688 u16 sd_digital; 3782 u32 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2;
3689 val16 |= (1<<5); 3783 val16 |= (1<<5);
3690 3784
3691 /* Enable CL37 1G Parallel Detect */ 3785 /* Enable CL37 1G Parallel Detect */
3692 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3786 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, addr, 0x1);
3693 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &sd_digital);
3694 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3695 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
3696 (sd_digital | 0x1));
3697
3698 DP(NETIF_MSG_LINK, "Advertize 1G\n"); 3787 DP(NETIF_MSG_LINK, "Advertize 1G\n");
3699 } 3788 }
3700 if (((vars->line_speed == SPEED_AUTO_NEG) && 3789 if (((vars->line_speed == SPEED_AUTO_NEG) &&
@@ -3704,7 +3793,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3704 val16 |= (1<<7); 3793 val16 |= (1<<7);
3705 /* Enable 10G Parallel Detect */ 3794 /* Enable 10G Parallel Detect */
3706 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3795 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3707 MDIO_WC_REG_PAR_DET_10G_CTRL, 1); 3796 MDIO_WC_REG_PAR_DET_10G_CTRL, 1);
3708 3797
3709 DP(NETIF_MSG_LINK, "Advertize 10G\n"); 3798 DP(NETIF_MSG_LINK, "Advertize 10G\n");
3710 } 3799 }
@@ -3738,10 +3827,9 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3738 offsetof(struct shmem_region, dev_info. 3827 offsetof(struct shmem_region, dev_info.
3739 port_hw_config[params->port].default_cfg)) & 3828 port_hw_config[params->port].default_cfg)) &
3740 PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) { 3829 PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
3741 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3830 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3742 MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, &bam37); 3831 MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL,
3743 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3832 1);
3744 MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, bam37 | 1);
3745 DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n"); 3833 DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
3746 } 3834 }
3747 3835
@@ -3755,11 +3843,8 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3755 DP(NETIF_MSG_LINK, "Enable AN KR work-around\n"); 3843 DP(NETIF_MSG_LINK, "Enable AN KR work-around\n");
3756 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; 3844 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
3757 } 3845 }
3758 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3846 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3759 MDIO_WC_REG_DIGITAL5_MISC7, &val16); 3847 MDIO_WC_REG_DIGITAL5_MISC7, 0x100);
3760
3761 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3762 MDIO_WC_REG_DIGITAL5_MISC7, val16 | 0x100);
3763 3848
3764 /* Over 1G - AN local device user page 1 */ 3849 /* Over 1G - AN local device user page 1 */
3765 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3850 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
@@ -3776,50 +3861,35 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
3776 struct link_vars *vars) 3861 struct link_vars *vars)
3777{ 3862{
3778 struct bnx2x *bp = params->bp; 3863 struct bnx2x *bp = params->bp;
3779 u16 val; 3864 u16 i;
3780 3865 static struct bnx2x_reg_set reg_set[] = {
3781 /* Disable Autoneg */ 3866 /* Disable Autoneg */
3782 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3867 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
3783 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7); 3868 {MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 0},
3784 3869 {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
3785 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3870 0x3f00},
3786 MDIO_WC_REG_PAR_DET_10G_CTRL, 0); 3871 {MDIO_AN_DEVAD, MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0},
3787 3872 {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0},
3788 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3873 {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1},
3789 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0x3f00); 3874 {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa},
3790 3875 /* Disable CL36 PCS Tx */
3791 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3876 {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0},
3792 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0); 3877 /* Double Wide Single Data Rate @ pll rate */
3793 3878 {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF},
3794 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3879 /* Leave cl72 training enable, needed for KR */
3795 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0); 3880 {MDIO_PMA_DEVAD,
3796
3797 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3798 MDIO_WC_REG_DIGITAL3_UP1, 0x1);
3799
3800 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3801 MDIO_WC_REG_DIGITAL5_MISC7, 0xa);
3802
3803 /* Disable CL36 PCS Tx */
3804 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3805 MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0);
3806
3807 /* Double Wide Single Data Rate @ pll rate */
3808 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3809 MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF);
3810
3811 /* Leave cl72 training enable, needed for KR */
3812 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
3813 MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150, 3881 MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150,
3814 0x2); 3882 0x2}
3883 };
3884
3885 for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
3886 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
3887 reg_set[i].val);
3815 3888
3816 /* Leave CL72 enabled */ 3889 /* Leave CL72 enabled */
3817 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3890 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3818 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 3891 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
3819 &val); 3892 0x3800);
3820 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3821 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
3822 val | 0x3800);
3823 3893
3824 /* Set speed via PMA/PMD register */ 3894 /* Set speed via PMA/PMD register */
3825 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 3895 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
@@ -3840,7 +3910,7 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
3840 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 3910 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3841 MDIO_WC_REG_RX66_CONTROL, 0xF9); 3911 MDIO_WC_REG_RX66_CONTROL, 0xF9);
3842 3912
3843 /* set and clear loopback to cause a reset to 64/66 decoder */ 3913 /* Set and clear loopback to cause a reset to 64/66 decoder */
3844 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3914 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3845 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x4000); 3915 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x4000);
3846 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3916 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
@@ -3855,16 +3925,12 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
3855 struct bnx2x *bp = params->bp; 3925 struct bnx2x *bp = params->bp;
3856 u16 misc1_val, tap_val, tx_driver_val, lane, val; 3926 u16 misc1_val, tap_val, tx_driver_val, lane, val;
3857 /* Hold rxSeqStart */ 3927 /* Hold rxSeqStart */
3858 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3928 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3859 MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val); 3929 MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x8000);
3860 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3861 MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val | 0x8000));
3862 3930
3863 /* Hold tx_fifo_reset */ 3931 /* Hold tx_fifo_reset */
3864 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3932 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3865 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val); 3933 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x1);
3866 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3867 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, (val | 0x1));
3868 3934
3869 /* Disable CL73 AN */ 3935 /* Disable CL73 AN */
3870 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0); 3936 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
@@ -3876,10 +3942,8 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
3876 MDIO_WC_REG_FX100_CTRL1, (val & 0xFFFA)); 3942 MDIO_WC_REG_FX100_CTRL1, (val & 0xFFFA));
3877 3943
3878 /* Disable 100FX Idle detect */ 3944 /* Disable 100FX Idle detect */
3879 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3945 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3880 MDIO_WC_REG_FX100_CTRL3, &val); 3946 MDIO_WC_REG_FX100_CTRL3, 0x0080);
3881 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3882 MDIO_WC_REG_FX100_CTRL3, (val | 0x0080));
3883 3947
3884 /* Set Block address to Remote PHY & Clear forced_speed[5] */ 3948 /* Set Block address to Remote PHY & Clear forced_speed[5] */
3885 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3949 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -3940,16 +4004,20 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
3940 tx_driver_val); 4004 tx_driver_val);
3941 4005
3942 /* Enable fiber mode, enable and invert sig_det */ 4006 /* Enable fiber mode, enable and invert sig_det */
3943 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4007 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3944 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val); 4008 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0xd);
3945 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3946 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, val | 0xd);
3947 4009
3948 /* Set Block address to Remote PHY & Set forced_speed[5], 40bit mode */ 4010 /* Set Block address to Remote PHY & Set forced_speed[5], 40bit mode */
3949 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4011 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3950 MDIO_WC_REG_DIGITAL4_MISC3, &val); 4012 MDIO_WC_REG_DIGITAL4_MISC3, 0x8080);
4013
4014 /* Enable LPI pass through */
4015 DP(NETIF_MSG_LINK, "Configure WC for LPI pass through\n");
3951 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4016 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3952 MDIO_WC_REG_DIGITAL4_MISC3, val | 0x8080); 4017 MDIO_WC_REG_EEE_COMBO_CONTROL0,
4018 0x7c);
4019 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
4020 MDIO_WC_REG_DIGITAL4_MISC5, 0xc000);
3953 4021
3954 /* 10G XFI Full Duplex */ 4022 /* 10G XFI Full Duplex */
3955 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4023 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
@@ -4139,40 +4207,35 @@ static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy,
4139 u16 lane) 4207 u16 lane)
4140{ 4208{
4141 struct bnx2x *bp = params->bp; 4209 struct bnx2x *bp = params->bp;
4142 u16 val16; 4210 u16 i;
4143 4211 static struct bnx2x_reg_set wc_regs[] = {
4212 {MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0},
4213 {MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL1, 0x014a},
4214 {MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL3, 0x0800},
4215 {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL4_MISC3, 0x8008},
4216 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
4217 0x0195},
4218 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
4219 0x0007},
4220 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3,
4221 0x0002},
4222 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000},
4223 {MDIO_WC_DEVAD, MDIO_WC_REG_TX_FIR_TAP, 0x0000},
4224 {MDIO_WC_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040},
4225 {MDIO_WC_DEVAD, MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140}
4226 };
4144 /* Set XFI clock comp as default. */ 4227 /* Set XFI clock comp as default. */
4145 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4228 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
4146 MDIO_WC_REG_RX66_CONTROL, &val16); 4229 MDIO_WC_REG_RX66_CONTROL, (3<<13));
4147 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4230
4148 MDIO_WC_REG_RX66_CONTROL, val16 | (3<<13)); 4231 for (i = 0; i < sizeof(wc_regs)/sizeof(struct bnx2x_reg_set); i++)
4232 bnx2x_cl45_write(bp, phy, wc_regs[i].devad, wc_regs[i].reg,
4233 wc_regs[i].val);
4149 4234
4150 bnx2x_warpcore_reset_lane(bp, phy, 1);
4151 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
4152 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4153 MDIO_WC_REG_FX100_CTRL1, 0x014a);
4154 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4155 MDIO_WC_REG_FX100_CTRL3, 0x0800);
4156 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4157 MDIO_WC_REG_DIGITAL4_MISC3, 0x8008);
4158 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4159 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x0195);
4160 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4161 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x0007);
4162 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4163 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x0002);
4164 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4165 MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000);
4166 lane = bnx2x_get_warpcore_lane(phy, params); 4235 lane = bnx2x_get_warpcore_lane(phy, params);
4167 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4236 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4168 MDIO_WC_REG_TX_FIR_TAP, 0x0000);
4169 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4170 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 0x0990); 4237 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 0x0990);
4171 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4238
4172 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040);
4173 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4174 MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140);
4175 bnx2x_warpcore_reset_lane(bp, phy, 0);
4176} 4239}
4177 4240
4178static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp, 4241static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp,
@@ -4260,7 +4323,7 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
4260 if (!vars->turn_to_run_wc_rt) 4323 if (!vars->turn_to_run_wc_rt)
4261 return; 4324 return;
4262 4325
4263 /* return if there is no link partner */ 4326 /* Return if there is no link partner */
4264 if (!(bnx2x_warpcore_get_sigdet(phy, params))) { 4327 if (!(bnx2x_warpcore_get_sigdet(phy, params))) {
4265 DP(NETIF_MSG_LINK, "bnx2x_warpcore_get_sigdet false\n"); 4328 DP(NETIF_MSG_LINK, "bnx2x_warpcore_get_sigdet false\n");
4266 return; 4329 return;
@@ -4294,7 +4357,7 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
4294 bnx2x_warpcore_reset_lane(bp, phy, 1); 4357 bnx2x_warpcore_reset_lane(bp, phy, 1);
4295 bnx2x_warpcore_reset_lane(bp, phy, 0); 4358 bnx2x_warpcore_reset_lane(bp, phy, 0);
4296 4359
4297 /* restart Autoneg */ 4360 /* Restart Autoneg */
4298 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 4361 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
4299 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200); 4362 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200);
4300 4363
@@ -4311,6 +4374,23 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
4311 } /*params->rx_tx_asic_rst*/ 4374 } /*params->rx_tx_asic_rst*/
4312 4375
4313} 4376}
4377static void bnx2x_warpcore_config_sfi(struct bnx2x_phy *phy,
4378 struct link_params *params)
4379{
4380 u16 lane = bnx2x_get_warpcore_lane(phy, params);
4381 struct bnx2x *bp = params->bp;
4382 bnx2x_warpcore_clear_regs(phy, params, lane);
4383 if ((params->req_line_speed[LINK_CONFIG_IDX(INT_PHY)] ==
4384 SPEED_10000) &&
4385 (phy->media_type != ETH_PHY_SFP_1G_FIBER)) {
4386 DP(NETIF_MSG_LINK, "Setting 10G SFI\n");
4387 bnx2x_warpcore_set_10G_XFI(phy, params, 0);
4388 } else {
4389 DP(NETIF_MSG_LINK, "Setting 1G Fiber\n");
4390 bnx2x_warpcore_set_sgmii_speed(phy, params, 1, 0);
4391 }
4392}
4393
4314static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy, 4394static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
4315 struct link_params *params, 4395 struct link_params *params,
4316 struct link_vars *vars) 4396 struct link_vars *vars)
@@ -4371,19 +4451,11 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
4371 break; 4451 break;
4372 4452
4373 case PORT_HW_CFG_NET_SERDES_IF_SFI: 4453 case PORT_HW_CFG_NET_SERDES_IF_SFI:
4374
4375 bnx2x_warpcore_clear_regs(phy, params, lane);
4376 if (vars->line_speed == SPEED_10000) {
4377 DP(NETIF_MSG_LINK, "Setting 10G SFI\n");
4378 bnx2x_warpcore_set_10G_XFI(phy, params, 0);
4379 } else if (vars->line_speed == SPEED_1000) {
4380 DP(NETIF_MSG_LINK, "Setting 1G Fiber\n");
4381 bnx2x_warpcore_set_sgmii_speed(
4382 phy, params, 1, 0);
4383 }
4384 /* Issue Module detection */ 4454 /* Issue Module detection */
4385 if (bnx2x_is_sfp_module_plugged(phy, params)) 4455 if (bnx2x_is_sfp_module_plugged(phy, params))
4386 bnx2x_sfp_module_detection(phy, params); 4456 bnx2x_sfp_module_detection(phy, params);
4457
4458 bnx2x_warpcore_config_sfi(phy, params);
4387 break; 4459 break;
4388 4460
4389 case PORT_HW_CFG_NET_SERDES_IF_DXGXS: 4461 case PORT_HW_CFG_NET_SERDES_IF_DXGXS:
@@ -4500,12 +4572,9 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
4500 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, 4572 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
4501 MDIO_AER_BLOCK_AER_REG, 0); 4573 MDIO_AER_BLOCK_AER_REG, 0);
4502 /* Enable 1G MDIO (1-copy) */ 4574 /* Enable 1G MDIO (1-copy) */
4503 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4575 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
4504 MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, 4576 MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
4505 &val16); 4577 0x10);
4506 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4507 MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
4508 val16 | 0x10);
4509 /* Set 1G loopback based on lane (1-copy) */ 4578 /* Set 1G loopback based on lane (1-copy) */
4510 lane = bnx2x_get_warpcore_lane(phy, params); 4579 lane = bnx2x_get_warpcore_lane(phy, params);
4511 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4580 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -4518,22 +4587,19 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
4518 bnx2x_set_aer_mmd(params, phy); 4587 bnx2x_set_aer_mmd(params, phy);
4519 } else { 4588 } else {
4520 /* 10G & 20G */ 4589 /* 10G & 20G */
4521 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4590 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
4522 MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); 4591 MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
4523 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4592 0x4000);
4524 MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 |
4525 0x4000);
4526 4593
4527 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4594 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
4528 MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16); 4595 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1);
4529 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4530 MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 | 0x1);
4531 } 4596 }
4532} 4597}
4533 4598
4534 4599
4535void bnx2x_sync_link(struct link_params *params, 4600
4536 struct link_vars *vars) 4601static void bnx2x_sync_link(struct link_params *params,
4602 struct link_vars *vars)
4537{ 4603{
4538 struct bnx2x *bp = params->bp; 4604 struct bnx2x *bp = params->bp;
4539 u8 link_10g_plus; 4605 u8 link_10g_plus;
@@ -4606,7 +4672,7 @@ void bnx2x_sync_link(struct link_params *params,
4606 USES_WARPCORE(bp) && 4672 USES_WARPCORE(bp) &&
4607 (vars->line_speed == SPEED_1000)) 4673 (vars->line_speed == SPEED_1000))
4608 vars->phy_flags |= PHY_SGMII_FLAG; 4674 vars->phy_flags |= PHY_SGMII_FLAG;
4609 /* anything 10 and over uses the bmac */ 4675 /* Anything 10 and over uses the bmac */
4610 link_10g_plus = (vars->line_speed >= SPEED_10000); 4676 link_10g_plus = (vars->line_speed >= SPEED_10000);
4611 4677
4612 if (link_10g_plus) { 4678 if (link_10g_plus) {
@@ -4620,7 +4686,7 @@ void bnx2x_sync_link(struct link_params *params,
4620 else 4686 else
4621 vars->mac_type = MAC_TYPE_EMAC; 4687 vars->mac_type = MAC_TYPE_EMAC;
4622 } 4688 }
4623 } else { /* link down */ 4689 } else { /* Link down */
4624 DP(NETIF_MSG_LINK, "phy link down\n"); 4690 DP(NETIF_MSG_LINK, "phy link down\n");
4625 4691
4626 vars->phy_link_up = 0; 4692 vars->phy_link_up = 0;
@@ -4629,10 +4695,12 @@ void bnx2x_sync_link(struct link_params *params,
4629 vars->duplex = DUPLEX_FULL; 4695 vars->duplex = DUPLEX_FULL;
4630 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 4696 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
4631 4697
4632 /* indicate no mac active */ 4698 /* Indicate no mac active */
4633 vars->mac_type = MAC_TYPE_NONE; 4699 vars->mac_type = MAC_TYPE_NONE;
4634 if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG) 4700 if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
4635 vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; 4701 vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
4702 if (vars->link_status & LINK_STATUS_SFP_TX_FAULT)
4703 vars->phy_flags |= PHY_SFP_TX_FAULT_FLAG;
4636 } 4704 }
4637} 4705}
4638 4706
@@ -4698,7 +4766,7 @@ static void bnx2x_set_master_ln(struct link_params *params,
4698 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> 4766 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
4699 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); 4767 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
4700 4768
4701 /* set the master_ln for AN */ 4769 /* Set the master_ln for AN */
4702 CL22_RD_OVER_CL45(bp, phy, 4770 CL22_RD_OVER_CL45(bp, phy,
4703 MDIO_REG_BANK_XGXS_BLOCK2, 4771 MDIO_REG_BANK_XGXS_BLOCK2,
4704 MDIO_XGXS_BLOCK2_TEST_MODE_LANE, 4772 MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
@@ -4721,7 +4789,7 @@ static int bnx2x_reset_unicore(struct link_params *params,
4721 MDIO_REG_BANK_COMBO_IEEE0, 4789 MDIO_REG_BANK_COMBO_IEEE0,
4722 MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control); 4790 MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
4723 4791
4724 /* reset the unicore */ 4792 /* Reset the unicore */
4725 CL22_WR_OVER_CL45(bp, phy, 4793 CL22_WR_OVER_CL45(bp, phy,
4726 MDIO_REG_BANK_COMBO_IEEE0, 4794 MDIO_REG_BANK_COMBO_IEEE0,
4727 MDIO_COMBO_IEEE0_MII_CONTROL, 4795 MDIO_COMBO_IEEE0_MII_CONTROL,
@@ -4730,11 +4798,11 @@ static int bnx2x_reset_unicore(struct link_params *params,
4730 if (set_serdes) 4798 if (set_serdes)
4731 bnx2x_set_serdes_access(bp, params->port); 4799 bnx2x_set_serdes_access(bp, params->port);
4732 4800
4733 /* wait for the reset to self clear */ 4801 /* Wait for the reset to self clear */
4734 for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) { 4802 for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
4735 udelay(5); 4803 udelay(5);
4736 4804
4737 /* the reset erased the previous bank value */ 4805 /* The reset erased the previous bank value */
4738 CL22_RD_OVER_CL45(bp, phy, 4806 CL22_RD_OVER_CL45(bp, phy,
4739 MDIO_REG_BANK_COMBO_IEEE0, 4807 MDIO_REG_BANK_COMBO_IEEE0,
4740 MDIO_COMBO_IEEE0_MII_CONTROL, 4808 MDIO_COMBO_IEEE0_MII_CONTROL,
@@ -4952,7 +5020,7 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
4952 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val); 5020 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
4953} 5021}
4954 5022
4955/* program SerDes, forced speed */ 5023/* Program SerDes, forced speed */
4956static void bnx2x_program_serdes(struct bnx2x_phy *phy, 5024static void bnx2x_program_serdes(struct bnx2x_phy *phy,
4957 struct link_params *params, 5025 struct link_params *params,
4958 struct link_vars *vars) 5026 struct link_vars *vars)
@@ -4960,7 +5028,7 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy,
4960 struct bnx2x *bp = params->bp; 5028 struct bnx2x *bp = params->bp;
4961 u16 reg_val; 5029 u16 reg_val;
4962 5030
4963 /* program duplex, disable autoneg and sgmii*/ 5031 /* Program duplex, disable autoneg and sgmii*/
4964 CL22_RD_OVER_CL45(bp, phy, 5032 CL22_RD_OVER_CL45(bp, phy,
4965 MDIO_REG_BANK_COMBO_IEEE0, 5033 MDIO_REG_BANK_COMBO_IEEE0,
4966 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val); 5034 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
@@ -4979,7 +5047,7 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy,
4979 CL22_RD_OVER_CL45(bp, phy, 5047 CL22_RD_OVER_CL45(bp, phy,
4980 MDIO_REG_BANK_SERDES_DIGITAL, 5048 MDIO_REG_BANK_SERDES_DIGITAL,
4981 MDIO_SERDES_DIGITAL_MISC1, &reg_val); 5049 MDIO_SERDES_DIGITAL_MISC1, &reg_val);
4982 /* clearing the speed value before setting the right speed */ 5050 /* Clearing the speed value before setting the right speed */
4983 DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val); 5051 DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
4984 5052
4985 reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK | 5053 reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK |
@@ -5008,7 +5076,7 @@ static void bnx2x_set_brcm_cl37_advertisement(struct bnx2x_phy *phy,
5008 struct bnx2x *bp = params->bp; 5076 struct bnx2x *bp = params->bp;
5009 u16 val = 0; 5077 u16 val = 0;
5010 5078
5011 /* set extended capabilities */ 5079 /* Set extended capabilities */
5012 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) 5080 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
5013 val |= MDIO_OVER_1G_UP1_2_5G; 5081 val |= MDIO_OVER_1G_UP1_2_5G;
5014 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 5082 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
@@ -5028,7 +5096,7 @@ static void bnx2x_set_ieee_aneg_advertisement(struct bnx2x_phy *phy,
5028{ 5096{
5029 struct bnx2x *bp = params->bp; 5097 struct bnx2x *bp = params->bp;
5030 u16 val; 5098 u16 val;
5031 /* for AN, we are always publishing full duplex */ 5099 /* For AN, we are always publishing full duplex */
5032 5100
5033 CL22_WR_OVER_CL45(bp, phy, 5101 CL22_WR_OVER_CL45(bp, phy,
5034 MDIO_REG_BANK_COMBO_IEEE0, 5102 MDIO_REG_BANK_COMBO_IEEE0,
@@ -5090,14 +5158,14 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
5090 struct bnx2x *bp = params->bp; 5158 struct bnx2x *bp = params->bp;
5091 u16 control1; 5159 u16 control1;
5092 5160
5093 /* in SGMII mode, the unicore is always slave */ 5161 /* In SGMII mode, the unicore is always slave */
5094 5162
5095 CL22_RD_OVER_CL45(bp, phy, 5163 CL22_RD_OVER_CL45(bp, phy,
5096 MDIO_REG_BANK_SERDES_DIGITAL, 5164 MDIO_REG_BANK_SERDES_DIGITAL,
5097 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, 5165 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
5098 &control1); 5166 &control1);
5099 control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT; 5167 control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
5100 /* set sgmii mode (and not fiber) */ 5168 /* Set sgmii mode (and not fiber) */
5101 control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE | 5169 control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
5102 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET | 5170 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
5103 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE); 5171 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
@@ -5106,9 +5174,9 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
5106 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, 5174 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
5107 control1); 5175 control1);
5108 5176
5109 /* if forced speed */ 5177 /* If forced speed */
5110 if (!(vars->line_speed == SPEED_AUTO_NEG)) { 5178 if (!(vars->line_speed == SPEED_AUTO_NEG)) {
5111 /* set speed, disable autoneg */ 5179 /* Set speed, disable autoneg */
5112 u16 mii_control; 5180 u16 mii_control;
5113 5181
5114 CL22_RD_OVER_CL45(bp, phy, 5182 CL22_RD_OVER_CL45(bp, phy,
@@ -5129,16 +5197,16 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
5129 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000; 5197 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
5130 break; 5198 break;
5131 case SPEED_10: 5199 case SPEED_10:
5132 /* there is nothing to set for 10M */ 5200 /* There is nothing to set for 10M */
5133 break; 5201 break;
5134 default: 5202 default:
5135 /* invalid speed for SGMII */ 5203 /* Invalid speed for SGMII */
5136 DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n", 5204 DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
5137 vars->line_speed); 5205 vars->line_speed);
5138 break; 5206 break;
5139 } 5207 }
5140 5208
5141 /* setting the full duplex */ 5209 /* Setting the full duplex */
5142 if (phy->req_duplex == DUPLEX_FULL) 5210 if (phy->req_duplex == DUPLEX_FULL)
5143 mii_control |= 5211 mii_control |=
5144 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; 5212 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
@@ -5148,7 +5216,7 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
5148 mii_control); 5216 mii_control);
5149 5217
5150 } else { /* AN mode */ 5218 } else { /* AN mode */
5151 /* enable and restart AN */ 5219 /* Enable and restart AN */
5152 bnx2x_restart_autoneg(phy, params, 0); 5220 bnx2x_restart_autoneg(phy, params, 0);
5153 } 5221 }
5154} 5222}
@@ -5244,7 +5312,7 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
5244 struct bnx2x *bp = params->bp; 5312 struct bnx2x *bp = params->bp;
5245 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 5313 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
5246 5314
5247 /* resolve from gp_status in case of AN complete and not sgmii */ 5315 /* Resolve from gp_status in case of AN complete and not sgmii */
5248 if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) { 5316 if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) {
5249 /* Update the advertised flow-controled of LD/LP in AN */ 5317 /* Update the advertised flow-controled of LD/LP in AN */
5250 if (phy->req_line_speed == SPEED_AUTO_NEG) 5318 if (phy->req_line_speed == SPEED_AUTO_NEG)
@@ -5468,7 +5536,7 @@ static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
5468 bnx2x_xgxs_an_resolve(phy, params, vars, 5536 bnx2x_xgxs_an_resolve(phy, params, vars,
5469 gp_status); 5537 gp_status);
5470 } 5538 }
5471 } else { /* link_down */ 5539 } else { /* Link_down */
5472 if ((phy->req_line_speed == SPEED_AUTO_NEG) && 5540 if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
5473 SINGLE_MEDIA_DIRECT(params)) { 5541 SINGLE_MEDIA_DIRECT(params)) {
5474 /* Check signal is detected */ 5542 /* Check signal is detected */
@@ -5617,12 +5685,12 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
5617 u16 tx_driver; 5685 u16 tx_driver;
5618 u16 bank; 5686 u16 bank;
5619 5687
5620 /* read precomp */ 5688 /* Read precomp */
5621 CL22_RD_OVER_CL45(bp, phy, 5689 CL22_RD_OVER_CL45(bp, phy,
5622 MDIO_REG_BANK_OVER_1G, 5690 MDIO_REG_BANK_OVER_1G,
5623 MDIO_OVER_1G_LP_UP2, &lp_up2); 5691 MDIO_OVER_1G_LP_UP2, &lp_up2);
5624 5692
5625 /* bits [10:7] at lp_up2, positioned at [15:12] */ 5693 /* Bits [10:7] at lp_up2, positioned at [15:12] */
5626 lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >> 5694 lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
5627 MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) << 5695 MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
5628 MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT); 5696 MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
@@ -5636,7 +5704,7 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
5636 bank, 5704 bank,
5637 MDIO_TX0_TX_DRIVER, &tx_driver); 5705 MDIO_TX0_TX_DRIVER, &tx_driver);
5638 5706
5639 /* replace tx_driver bits [15:12] */ 5707 /* Replace tx_driver bits [15:12] */
5640 if (lp_up2 != 5708 if (lp_up2 !=
5641 (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) { 5709 (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
5642 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK; 5710 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
@@ -5732,16 +5800,16 @@ static void bnx2x_xgxs_config_init(struct bnx2x_phy *phy,
5732 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) 5800 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED))
5733 bnx2x_set_preemphasis(phy, params); 5801 bnx2x_set_preemphasis(phy, params);
5734 5802
5735 /* forced speed requested? */ 5803 /* Forced speed requested? */
5736 if (vars->line_speed != SPEED_AUTO_NEG || 5804 if (vars->line_speed != SPEED_AUTO_NEG ||
5737 (SINGLE_MEDIA_DIRECT(params) && 5805 (SINGLE_MEDIA_DIRECT(params) &&
5738 params->loopback_mode == LOOPBACK_EXT)) { 5806 params->loopback_mode == LOOPBACK_EXT)) {
5739 DP(NETIF_MSG_LINK, "not SGMII, no AN\n"); 5807 DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
5740 5808
5741 /* disable autoneg */ 5809 /* Disable autoneg */
5742 bnx2x_set_autoneg(phy, params, vars, 0); 5810 bnx2x_set_autoneg(phy, params, vars, 0);
5743 5811
5744 /* program speed and duplex */ 5812 /* Program speed and duplex */
5745 bnx2x_program_serdes(phy, params, vars); 5813 bnx2x_program_serdes(phy, params, vars);
5746 5814
5747 } else { /* AN_mode */ 5815 } else { /* AN_mode */
@@ -5750,14 +5818,14 @@ static void bnx2x_xgxs_config_init(struct bnx2x_phy *phy,
5750 /* AN enabled */ 5818 /* AN enabled */
5751 bnx2x_set_brcm_cl37_advertisement(phy, params); 5819 bnx2x_set_brcm_cl37_advertisement(phy, params);
5752 5820
5753 /* program duplex & pause advertisement (for aneg) */ 5821 /* Program duplex & pause advertisement (for aneg) */
5754 bnx2x_set_ieee_aneg_advertisement(phy, params, 5822 bnx2x_set_ieee_aneg_advertisement(phy, params,
5755 vars->ieee_fc); 5823 vars->ieee_fc);
5756 5824
5757 /* enable autoneg */ 5825 /* Enable autoneg */
5758 bnx2x_set_autoneg(phy, params, vars, enable_cl73); 5826 bnx2x_set_autoneg(phy, params, vars, enable_cl73);
5759 5827
5760 /* enable and restart AN */ 5828 /* Enable and restart AN */
5761 bnx2x_restart_autoneg(phy, params, enable_cl73); 5829 bnx2x_restart_autoneg(phy, params, enable_cl73);
5762 } 5830 }
5763 5831
@@ -5793,12 +5861,12 @@ static int bnx2x_prepare_xgxs(struct bnx2x_phy *phy,
5793 bnx2x_set_master_ln(params, phy); 5861 bnx2x_set_master_ln(params, phy);
5794 5862
5795 rc = bnx2x_reset_unicore(params, phy, 0); 5863 rc = bnx2x_reset_unicore(params, phy, 0);
5796 /* reset the SerDes and wait for reset bit return low */ 5864 /* Reset the SerDes and wait for reset bit return low */
5797 if (rc != 0) 5865 if (rc)
5798 return rc; 5866 return rc;
5799 5867
5800 bnx2x_set_aer_mmd(params, phy); 5868 bnx2x_set_aer_mmd(params, phy);
5801 /* setting the masterLn_def again after the reset */ 5869 /* Setting the masterLn_def again after the reset */
5802 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) { 5870 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
5803 bnx2x_set_master_ln(params, phy); 5871 bnx2x_set_master_ln(params, phy);
5804 bnx2x_set_swap_lanes(params, phy); 5872 bnx2x_set_swap_lanes(params, phy);
@@ -5823,7 +5891,7 @@ static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
5823 MDIO_PMA_REG_CTRL, &ctrl); 5891 MDIO_PMA_REG_CTRL, &ctrl);
5824 if (!(ctrl & (1<<15))) 5892 if (!(ctrl & (1<<15)))
5825 break; 5893 break;
5826 msleep(1); 5894 usleep_range(1000, 2000);
5827 } 5895 }
5828 5896
5829 if (cnt == 1000) 5897 if (cnt == 1000)
@@ -6054,7 +6122,7 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
6054 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n"); 6122 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
6055 6123
6056 if (!CHIP_IS_E3(bp)) { 6124 if (!CHIP_IS_E3(bp)) {
6057 /* change the uni_phy_addr in the nig */ 6125 /* Change the uni_phy_addr in the nig */
6058 md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + 6126 md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
6059 port*0x18)); 6127 port*0x18));
6060 6128
@@ -6074,11 +6142,11 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
6074 (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)), 6142 (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
6075 0x6041); 6143 0x6041);
6076 msleep(200); 6144 msleep(200);
6077 /* set aer mmd back */ 6145 /* Set aer mmd back */
6078 bnx2x_set_aer_mmd(params, phy); 6146 bnx2x_set_aer_mmd(params, phy);
6079 6147
6080 if (!CHIP_IS_E3(bp)) { 6148 if (!CHIP_IS_E3(bp)) {
6081 /* and md_devad */ 6149 /* And md_devad */
6082 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 6150 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
6083 md_devad); 6151 md_devad);
6084 } 6152 }
@@ -6275,7 +6343,7 @@ int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
6275 MDIO_REG_BANK_GP_STATUS, 6343 MDIO_REG_BANK_GP_STATUS,
6276 MDIO_GP_STATUS_TOP_AN_STATUS1, 6344 MDIO_GP_STATUS_TOP_AN_STATUS1,
6277 &gp_status); 6345 &gp_status);
6278 /* link is up only if both local phy and external phy are up */ 6346 /* Link is up only if both local phy and external phy are up */
6279 if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS)) 6347 if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
6280 return -ESRCH; 6348 return -ESRCH;
6281 } 6349 }
@@ -6296,7 +6364,9 @@ int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
6296 for (phy_index = EXT_PHY1; phy_index < params->num_phys; 6364 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
6297 phy_index++) { 6365 phy_index++) {
6298 serdes_phy_type = ((params->phy[phy_index].media_type == 6366 serdes_phy_type = ((params->phy[phy_index].media_type ==
6299 ETH_PHY_SFP_FIBER) || 6367 ETH_PHY_SFPP_10G_FIBER) ||
6368 (params->phy[phy_index].media_type ==
6369 ETH_PHY_SFP_1G_FIBER) ||
6300 (params->phy[phy_index].media_type == 6370 (params->phy[phy_index].media_type ==
6301 ETH_PHY_XFP_FIBER) || 6371 ETH_PHY_XFP_FIBER) ||
6302 (params->phy[phy_index].media_type == 6372 (params->phy[phy_index].media_type ==
@@ -6397,7 +6467,7 @@ static int bnx2x_link_initialize(struct link_params *params,
6397static void bnx2x_int_link_reset(struct bnx2x_phy *phy, 6467static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
6398 struct link_params *params) 6468 struct link_params *params)
6399{ 6469{
6400 /* reset the SerDes/XGXS */ 6470 /* Reset the SerDes/XGXS */
6401 REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, 6471 REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
6402 (0x1ff << (params->port*16))); 6472 (0x1ff << (params->port*16)));
6403} 6473}
@@ -6430,10 +6500,10 @@ static int bnx2x_update_link_down(struct link_params *params,
6430 DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port); 6500 DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
6431 bnx2x_set_led(params, vars, LED_MODE_OFF, 0); 6501 bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
6432 vars->phy_flags &= ~PHY_PHYSICAL_LINK_FLAG; 6502 vars->phy_flags &= ~PHY_PHYSICAL_LINK_FLAG;
6433 /* indicate no mac active */ 6503 /* Indicate no mac active */
6434 vars->mac_type = MAC_TYPE_NONE; 6504 vars->mac_type = MAC_TYPE_NONE;
6435 6505
6436 /* update shared memory */ 6506 /* Update shared memory */
6437 vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK | 6507 vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK |
6438 LINK_STATUS_LINK_UP | 6508 LINK_STATUS_LINK_UP |
6439 LINK_STATUS_PHYSICAL_LINK_FLAG | 6509 LINK_STATUS_PHYSICAL_LINK_FLAG |
@@ -6446,15 +6516,15 @@ static int bnx2x_update_link_down(struct link_params *params,
6446 vars->line_speed = 0; 6516 vars->line_speed = 0;
6447 bnx2x_update_mng(params, vars->link_status); 6517 bnx2x_update_mng(params, vars->link_status);
6448 6518
6449 /* activate nig drain */ 6519 /* Activate nig drain */
6450 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); 6520 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
6451 6521
6452 /* disable emac */ 6522 /* Disable emac */
6453 if (!CHIP_IS_E3(bp)) 6523 if (!CHIP_IS_E3(bp))
6454 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); 6524 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
6455 6525
6456 msleep(10); 6526 usleep_range(10000, 20000);
6457 /* reset BigMac/Xmac */ 6527 /* Reset BigMac/Xmac */
6458 if (CHIP_IS_E1x(bp) || 6528 if (CHIP_IS_E1x(bp) ||
6459 CHIP_IS_E2(bp)) { 6529 CHIP_IS_E2(bp)) {
6460 bnx2x_bmac_rx_disable(bp, params->port); 6530 bnx2x_bmac_rx_disable(bp, params->port);
@@ -6463,6 +6533,16 @@ static int bnx2x_update_link_down(struct link_params *params,
6463 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 6533 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
6464 } 6534 }
6465 if (CHIP_IS_E3(bp)) { 6535 if (CHIP_IS_E3(bp)) {
6536 /* Prevent LPI Generation by chip */
6537 REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2),
6538 0);
6539 REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 0);
6540 REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 + (params->port << 2),
6541 0);
6542 vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK |
6543 SHMEM_EEE_ACTIVE_BIT);
6544
6545 bnx2x_update_mng_eee(params, vars->eee_status);
6466 bnx2x_xmac_disable(params); 6546 bnx2x_xmac_disable(params);
6467 bnx2x_umac_disable(params); 6547 bnx2x_umac_disable(params);
6468 } 6548 }
@@ -6502,6 +6582,16 @@ static int bnx2x_update_link_up(struct link_params *params,
6502 bnx2x_umac_enable(params, vars, 0); 6582 bnx2x_umac_enable(params, vars, 0);
6503 bnx2x_set_led(params, vars, 6583 bnx2x_set_led(params, vars,
6504 LED_MODE_OPER, vars->line_speed); 6584 LED_MODE_OPER, vars->line_speed);
6585
6586 if ((vars->eee_status & SHMEM_EEE_ACTIVE_BIT) &&
6587 (vars->eee_status & SHMEM_EEE_LPI_REQUESTED_BIT)) {
6588 DP(NETIF_MSG_LINK, "Enabling LPI assertion\n");
6589 REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 +
6590 (params->port << 2), 1);
6591 REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 1);
6592 REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 +
6593 (params->port << 2), 0xfc20);
6594 }
6505 } 6595 }
6506 if ((CHIP_IS_E1x(bp) || 6596 if ((CHIP_IS_E1x(bp) ||
6507 CHIP_IS_E2(bp))) { 6597 CHIP_IS_E2(bp))) {
@@ -6534,12 +6624,12 @@ static int bnx2x_update_link_up(struct link_params *params,
6534 rc |= bnx2x_pbf_update(params, vars->flow_ctrl, 6624 rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
6535 vars->line_speed); 6625 vars->line_speed);
6536 6626
6537 /* disable drain */ 6627 /* Disable drain */
6538 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0); 6628 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
6539 6629
6540 /* update shared memory */ 6630 /* Update shared memory */
6541 bnx2x_update_mng(params, vars->link_status); 6631 bnx2x_update_mng(params, vars->link_status);
6542 6632 bnx2x_update_mng_eee(params, vars->eee_status);
6543 /* Check remote fault */ 6633 /* Check remote fault */
6544 for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) { 6634 for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
6545 if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) { 6635 if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) {
@@ -6583,6 +6673,8 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6583 phy_vars[phy_index].phy_link_up = 0; 6673 phy_vars[phy_index].phy_link_up = 0;
6584 phy_vars[phy_index].link_up = 0; 6674 phy_vars[phy_index].link_up = 0;
6585 phy_vars[phy_index].fault_detected = 0; 6675 phy_vars[phy_index].fault_detected = 0;
6676 /* different consideration, since vars holds inner state */
6677 phy_vars[phy_index].eee_status = vars->eee_status;
6586 } 6678 }
6587 6679
6588 if (USES_WARPCORE(bp)) 6680 if (USES_WARPCORE(bp))
@@ -6603,7 +6695,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6603 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), 6695 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
6604 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)); 6696 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
6605 6697
6606 /* disable emac */ 6698 /* Disable emac */
6607 if (!CHIP_IS_E3(bp)) 6699 if (!CHIP_IS_E3(bp))
6608 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); 6700 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
6609 6701
@@ -6712,6 +6804,9 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6712 vars->link_status |= LINK_STATUS_SERDES_LINK; 6804 vars->link_status |= LINK_STATUS_SERDES_LINK;
6713 else 6805 else
6714 vars->link_status &= ~LINK_STATUS_SERDES_LINK; 6806 vars->link_status &= ~LINK_STATUS_SERDES_LINK;
6807
6808 vars->eee_status = phy_vars[active_external_phy].eee_status;
6809
6715 DP(NETIF_MSG_LINK, "Active external phy selected: %x\n", 6810 DP(NETIF_MSG_LINK, "Active external phy selected: %x\n",
6716 active_external_phy); 6811 active_external_phy);
6717 } 6812 }
@@ -6745,11 +6840,11 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6745 } else if (prev_line_speed != vars->line_speed) { 6840 } else if (prev_line_speed != vars->line_speed) {
6746 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 6841 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4,
6747 0); 6842 0);
6748 msleep(1); 6843 usleep_range(1000, 2000);
6749 } 6844 }
6750 } 6845 }
6751 6846
6752 /* anything 10 and over uses the bmac */ 6847 /* Anything 10 and over uses the bmac */
6753 link_10g_plus = (vars->line_speed >= SPEED_10000); 6848 link_10g_plus = (vars->line_speed >= SPEED_10000);
6754 6849
6755 bnx2x_link_int_ack(params, vars, link_10g_plus); 6850 bnx2x_link_int_ack(params, vars, link_10g_plus);
@@ -6815,7 +6910,7 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
6815{ 6910{
6816 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 6911 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
6817 MISC_REGISTERS_GPIO_OUTPUT_LOW, port); 6912 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
6818 msleep(1); 6913 usleep_range(1000, 2000);
6819 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 6914 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
6820 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); 6915 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
6821} 6916}
@@ -6912,7 +7007,7 @@ static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
6912 MDIO_PMA_REG_GEN_CTRL, 7007 MDIO_PMA_REG_GEN_CTRL,
6913 0x0001); 7008 0x0001);
6914 7009
6915 /* ucode reboot and rst */ 7010 /* Ucode reboot and rst */
6916 bnx2x_cl45_write(bp, phy, 7011 bnx2x_cl45_write(bp, phy,
6917 MDIO_PMA_DEVAD, 7012 MDIO_PMA_DEVAD,
6918 MDIO_PMA_REG_GEN_CTRL, 7013 MDIO_PMA_REG_GEN_CTRL,
@@ -6956,7 +7051,7 @@ static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
6956 MDIO_PMA_DEVAD, 7051 MDIO_PMA_DEVAD,
6957 MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout); 7052 MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout);
6958 7053
6959 msleep(1); 7054 usleep_range(1000, 2000);
6960 } while (fw_ver1 == 0 || fw_ver1 == 0x4321 || 7055 } while (fw_ver1 == 0 || fw_ver1 == 0x4321 ||
6961 ((fw_msgout & 0xff) != 0x03 && (phy->type == 7056 ((fw_msgout & 0xff) != 0x03 && (phy->type ==
6962 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))); 7057 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)));
@@ -7050,11 +7145,11 @@ static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
7050 "XAUI workaround has completed\n"); 7145 "XAUI workaround has completed\n");
7051 return 0; 7146 return 0;
7052 } 7147 }
7053 msleep(3); 7148 usleep_range(3000, 6000);
7054 } 7149 }
7055 break; 7150 break;
7056 } 7151 }
7057 msleep(3); 7152 usleep_range(3000, 6000);
7058 } 7153 }
7059 DP(NETIF_MSG_LINK, "Warning: XAUI work-around timeout !!!\n"); 7154 DP(NETIF_MSG_LINK, "Warning: XAUI work-around timeout !!!\n");
7060 return -EINVAL; 7155 return -EINVAL;
@@ -7128,7 +7223,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
7128 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 7223 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
7129 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); 7224 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
7130 7225
7131 /* enable LASI */ 7226 /* Enable LASI */
7132 bnx2x_cl45_write(bp, phy, 7227 bnx2x_cl45_write(bp, phy,
7133 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2)); 7228 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2));
7134 bnx2x_cl45_write(bp, phy, 7229 bnx2x_cl45_write(bp, phy,
@@ -7276,7 +7371,7 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
7276 7371
7277 DP(NETIF_MSG_LINK, "8703 LASI status 0x%x\n", val1); 7372 DP(NETIF_MSG_LINK, "8703 LASI status 0x%x\n", val1);
7278 7373
7279 /* clear the interrupt LASI status register */ 7374 /* Clear the interrupt LASI status register */
7280 bnx2x_cl45_read(bp, phy, 7375 bnx2x_cl45_read(bp, phy,
7281 MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2); 7376 MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
7282 bnx2x_cl45_read(bp, phy, 7377 bnx2x_cl45_read(bp, phy,
@@ -7601,7 +7696,7 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7601 struct bnx2x *bp = params->bp; 7696 struct bnx2x *bp = params->bp;
7602 u16 val = 0; 7697 u16 val = 0;
7603 u16 i; 7698 u16 i;
7604 if (byte_cnt > 16) { 7699 if (byte_cnt > SFP_EEPROM_PAGE_SIZE) {
7605 DP(NETIF_MSG_LINK, 7700 DP(NETIF_MSG_LINK,
7606 "Reading from eeprom is limited to 0xf\n"); 7701 "Reading from eeprom is limited to 0xf\n");
7607 return -EINVAL; 7702 return -EINVAL;
@@ -7655,7 +7750,7 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7655 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 7750 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
7656 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) 7751 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
7657 return 0; 7752 return 0;
7658 msleep(1); 7753 usleep_range(1000, 2000);
7659 } 7754 }
7660 return -EINVAL; 7755 return -EINVAL;
7661} 7756}
@@ -7692,7 +7787,8 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7692 u32 data_array[4]; 7787 u32 data_array[4];
7693 u16 addr32; 7788 u16 addr32;
7694 struct bnx2x *bp = params->bp; 7789 struct bnx2x *bp = params->bp;
7695 if (byte_cnt > 16) { 7790
7791 if (byte_cnt > SFP_EEPROM_PAGE_SIZE) {
7696 DP(NETIF_MSG_LINK, 7792 DP(NETIF_MSG_LINK,
7697 "Reading from eeprom is limited to 16 bytes\n"); 7793 "Reading from eeprom is limited to 16 bytes\n");
7698 return -EINVAL; 7794 return -EINVAL;
@@ -7728,7 +7824,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7728 struct bnx2x *bp = params->bp; 7824 struct bnx2x *bp = params->bp;
7729 u16 val, i; 7825 u16 val, i;
7730 7826
7731 if (byte_cnt > 16) { 7827 if (byte_cnt > SFP_EEPROM_PAGE_SIZE) {
7732 DP(NETIF_MSG_LINK, 7828 DP(NETIF_MSG_LINK,
7733 "Reading from eeprom is limited to 0xf\n"); 7829 "Reading from eeprom is limited to 0xf\n");
7734 return -EINVAL; 7830 return -EINVAL;
@@ -7765,7 +7861,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7765 /* Wait appropriate time for two-wire command to finish before 7861 /* Wait appropriate time for two-wire command to finish before
7766 * polling the status register 7862 * polling the status register
7767 */ 7863 */
7768 msleep(1); 7864 usleep_range(1000, 2000);
7769 7865
7770 /* Wait up to 500us for command complete status */ 7866 /* Wait up to 500us for command complete status */
7771 for (i = 0; i < 100; i++) { 7867 for (i = 0; i < 100; i++) {
@@ -7801,7 +7897,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7801 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 7897 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
7802 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) 7898 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
7803 return 0; 7899 return 0;
7804 msleep(1); 7900 usleep_range(1000, 2000);
7805 } 7901 }
7806 7902
7807 return -EINVAL; 7903 return -EINVAL;
@@ -7811,7 +7907,7 @@ int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7811 struct link_params *params, u16 addr, 7907 struct link_params *params, u16 addr,
7812 u8 byte_cnt, u8 *o_buf) 7908 u8 byte_cnt, u8 *o_buf)
7813{ 7909{
7814 int rc = -EINVAL; 7910 int rc = -EOPNOTSUPP;
7815 switch (phy->type) { 7911 switch (phy->type) {
7816 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: 7912 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7817 rc = bnx2x_8726_read_sfp_module_eeprom(phy, params, addr, 7913 rc = bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
@@ -7836,7 +7932,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
7836{ 7932{
7837 struct bnx2x *bp = params->bp; 7933 struct bnx2x *bp = params->bp;
7838 u32 sync_offset = 0, phy_idx, media_types; 7934 u32 sync_offset = 0, phy_idx, media_types;
7839 u8 val, check_limiting_mode = 0; 7935 u8 val[2], check_limiting_mode = 0;
7840 *edc_mode = EDC_MODE_LIMITING; 7936 *edc_mode = EDC_MODE_LIMITING;
7841 7937
7842 phy->media_type = ETH_PHY_UNSPECIFIED; 7938 phy->media_type = ETH_PHY_UNSPECIFIED;
@@ -7844,13 +7940,13 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
7844 if (bnx2x_read_sfp_module_eeprom(phy, 7940 if (bnx2x_read_sfp_module_eeprom(phy,
7845 params, 7941 params,
7846 SFP_EEPROM_CON_TYPE_ADDR, 7942 SFP_EEPROM_CON_TYPE_ADDR,
7847 1, 7943 2,
7848 &val) != 0) { 7944 (u8 *)val) != 0) {
7849 DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n"); 7945 DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n");
7850 return -EINVAL; 7946 return -EINVAL;
7851 } 7947 }
7852 7948
7853 switch (val) { 7949 switch (val[0]) {
7854 case SFP_EEPROM_CON_TYPE_VAL_COPPER: 7950 case SFP_EEPROM_CON_TYPE_VAL_COPPER:
7855 { 7951 {
7856 u8 copper_module_type; 7952 u8 copper_module_type;
@@ -7888,13 +7984,29 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
7888 break; 7984 break;
7889 } 7985 }
7890 case SFP_EEPROM_CON_TYPE_VAL_LC: 7986 case SFP_EEPROM_CON_TYPE_VAL_LC:
7891 phy->media_type = ETH_PHY_SFP_FIBER;
7892 DP(NETIF_MSG_LINK, "Optic module detected\n");
7893 check_limiting_mode = 1; 7987 check_limiting_mode = 1;
7988 if ((val[1] & (SFP_EEPROM_COMP_CODE_SR_MASK |
7989 SFP_EEPROM_COMP_CODE_LR_MASK |
7990 SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) {
7991 DP(NETIF_MSG_LINK, "1G Optic module detected\n");
7992 phy->media_type = ETH_PHY_SFP_1G_FIBER;
7993 phy->req_line_speed = SPEED_1000;
7994 } else {
7995 int idx, cfg_idx = 0;
7996 DP(NETIF_MSG_LINK, "10G Optic module detected\n");
7997 for (idx = INT_PHY; idx < MAX_PHYS; idx++) {
7998 if (params->phy[idx].type == phy->type) {
7999 cfg_idx = LINK_CONFIG_IDX(idx);
8000 break;
8001 }
8002 }
8003 phy->media_type = ETH_PHY_SFPP_10G_FIBER;
8004 phy->req_line_speed = params->req_line_speed[cfg_idx];
8005 }
7894 break; 8006 break;
7895 default: 8007 default:
7896 DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n", 8008 DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n",
7897 val); 8009 val[0]);
7898 return -EINVAL; 8010 return -EINVAL;
7899 } 8011 }
7900 sync_offset = params->shmem_base + 8012 sync_offset = params->shmem_base +
@@ -7980,7 +8092,7 @@ static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
7980 return 0; 8092 return 0;
7981 } 8093 }
7982 8094
7983 /* format the warning message */ 8095 /* Format the warning message */
7984 if (bnx2x_read_sfp_module_eeprom(phy, 8096 if (bnx2x_read_sfp_module_eeprom(phy,
7985 params, 8097 params,
7986 SFP_EEPROM_VENDOR_NAME_ADDR, 8098 SFP_EEPROM_VENDOR_NAME_ADDR,
@@ -8026,7 +8138,7 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
8026 timeout * 5); 8138 timeout * 5);
8027 return 0; 8139 return 0;
8028 } 8140 }
8029 msleep(5); 8141 usleep_range(5000, 10000);
8030 } 8142 }
8031 return -EINVAL; 8143 return -EINVAL;
8032} 8144}
@@ -8338,7 +8450,7 @@ int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
8338 DP(NETIF_MSG_LINK, "Failed to get valid module type\n"); 8450 DP(NETIF_MSG_LINK, "Failed to get valid module type\n");
8339 return -EINVAL; 8451 return -EINVAL;
8340 } else if (bnx2x_verify_sfp_module(phy, params) != 0) { 8452 } else if (bnx2x_verify_sfp_module(phy, params) != 0) {
8341 /* check SFP+ module compatibility */ 8453 /* Check SFP+ module compatibility */
8342 DP(NETIF_MSG_LINK, "Module verification failed!!\n"); 8454 DP(NETIF_MSG_LINK, "Module verification failed!!\n");
8343 rc = -EINVAL; 8455 rc = -EINVAL;
8344 /* Turn on fault module-detected led */ 8456 /* Turn on fault module-detected led */
@@ -8401,14 +8513,34 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
8401 8513
8402 /* Call the handling function in case module is detected */ 8514 /* Call the handling function in case module is detected */
8403 if (gpio_val == 0) { 8515 if (gpio_val == 0) {
8516 bnx2x_set_mdio_clk(bp, params->chip_id, params->port);
8517 bnx2x_set_aer_mmd(params, phy);
8518
8404 bnx2x_power_sfp_module(params, phy, 1); 8519 bnx2x_power_sfp_module(params, phy, 1);
8405 bnx2x_set_gpio_int(bp, gpio_num, 8520 bnx2x_set_gpio_int(bp, gpio_num,
8406 MISC_REGISTERS_GPIO_INT_OUTPUT_CLR, 8521 MISC_REGISTERS_GPIO_INT_OUTPUT_CLR,
8407 gpio_port); 8522 gpio_port);
8408 if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0) 8523 if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0) {
8409 bnx2x_sfp_module_detection(phy, params); 8524 bnx2x_sfp_module_detection(phy, params);
8410 else 8525 if (CHIP_IS_E3(bp)) {
8526 u16 rx_tx_in_reset;
8527 /* In case WC is out of reset, reconfigure the
8528 * link speed while taking into account 1G
8529 * module limitation.
8530 */
8531 bnx2x_cl45_read(bp, phy,
8532 MDIO_WC_DEVAD,
8533 MDIO_WC_REG_DIGITAL5_MISC6,
8534 &rx_tx_in_reset);
8535 if (!rx_tx_in_reset) {
8536 bnx2x_warpcore_reset_lane(bp, phy, 1);
8537 bnx2x_warpcore_config_sfi(phy, params);
8538 bnx2x_warpcore_reset_lane(bp, phy, 0);
8539 }
8540 }
8541 } else {
8411 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n"); 8542 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
8543 }
8412 } else { 8544 } else {
8413 u32 val = REG_RD(bp, params->shmem_base + 8545 u32 val = REG_RD(bp, params->shmem_base +
8414 offsetof(struct shmem_region, dev_info. 8546 offsetof(struct shmem_region, dev_info.
@@ -8469,7 +8601,7 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
8469 bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT, 8601 bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT,
8470 MDIO_PMA_LASI_TXCTRL); 8602 MDIO_PMA_LASI_TXCTRL);
8471 8603
8472 /* clear LASI indication*/ 8604 /* Clear LASI indication*/
8473 bnx2x_cl45_read(bp, phy, 8605 bnx2x_cl45_read(bp, phy,
8474 MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); 8606 MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
8475 bnx2x_cl45_read(bp, phy, 8607 bnx2x_cl45_read(bp, phy,
@@ -8537,7 +8669,7 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
8537 MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER1, &val); 8669 MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER1, &val);
8538 if (val) 8670 if (val)
8539 break; 8671 break;
8540 msleep(10); 8672 usleep_range(10000, 20000);
8541 } 8673 }
8542 DP(NETIF_MSG_LINK, "XGXS 8706 is initialized after %d ms\n", cnt); 8674 DP(NETIF_MSG_LINK, "XGXS 8706 is initialized after %d ms\n", cnt);
8543 if ((params->feature_config_flags & 8675 if ((params->feature_config_flags &
@@ -8666,7 +8798,7 @@ static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy,
8666 MDIO_PMA_REG_GEN_CTRL, 8798 MDIO_PMA_REG_GEN_CTRL,
8667 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); 8799 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
8668 8800
8669 /* wait for 150ms for microcode load */ 8801 /* Wait for 150ms for microcode load */
8670 msleep(150); 8802 msleep(150);
8671 8803
8672 /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */ 8804 /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
@@ -8860,6 +8992,63 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
8860 MISC_REGISTERS_GPIO_OUTPUT_LOW, port); 8992 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
8861} 8993}
8862 8994
8995static void bnx2x_8727_config_speed(struct bnx2x_phy *phy,
8996 struct link_params *params)
8997{
8998 struct bnx2x *bp = params->bp;
8999 u16 tmp1, val;
9000 /* Set option 1G speed */
9001 if ((phy->req_line_speed == SPEED_1000) ||
9002 (phy->media_type == ETH_PHY_SFP_1G_FIBER)) {
9003 DP(NETIF_MSG_LINK, "Setting 1G force\n");
9004 bnx2x_cl45_write(bp, phy,
9005 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
9006 bnx2x_cl45_write(bp, phy,
9007 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
9008 bnx2x_cl45_read(bp, phy,
9009 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
9010 DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
9011 /* Power down the XAUI until link is up in case of dual-media
9012 * and 1G
9013 */
9014 if (DUAL_MEDIA(params)) {
9015 bnx2x_cl45_read(bp, phy,
9016 MDIO_PMA_DEVAD,
9017 MDIO_PMA_REG_8727_PCS_GP, &val);
9018 val |= (3<<10);
9019 bnx2x_cl45_write(bp, phy,
9020 MDIO_PMA_DEVAD,
9021 MDIO_PMA_REG_8727_PCS_GP, val);
9022 }
9023 } else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
9024 ((phy->speed_cap_mask &
9025 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) &&
9026 ((phy->speed_cap_mask &
9027 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
9028 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
9029
9030 DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
9031 bnx2x_cl45_write(bp, phy,
9032 MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0);
9033 bnx2x_cl45_write(bp, phy,
9034 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
9035 } else {
9036 /* Since the 8727 has only single reset pin, need to set the 10G
9037 * registers although it is default
9038 */
9039 bnx2x_cl45_write(bp, phy,
9040 MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL,
9041 0x0020);
9042 bnx2x_cl45_write(bp, phy,
9043 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100);
9044 bnx2x_cl45_write(bp, phy,
9045 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
9046 bnx2x_cl45_write(bp, phy,
9047 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2,
9048 0x0008);
9049 }
9050}
9051
8863static int bnx2x_8727_config_init(struct bnx2x_phy *phy, 9052static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
8864 struct link_params *params, 9053 struct link_params *params,
8865 struct link_vars *vars) 9054 struct link_vars *vars)
@@ -8877,7 +9066,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
8877 lasi_ctrl_val = 0x0006; 9066 lasi_ctrl_val = 0x0006;
8878 9067
8879 DP(NETIF_MSG_LINK, "Initializing BCM8727\n"); 9068 DP(NETIF_MSG_LINK, "Initializing BCM8727\n");
8880 /* enable LASI */ 9069 /* Enable LASI */
8881 bnx2x_cl45_write(bp, phy, 9070 bnx2x_cl45_write(bp, phy,
8882 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, 9071 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
8883 rx_alarm_ctrl_val); 9072 rx_alarm_ctrl_val);
@@ -8929,56 +9118,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
8929 bnx2x_cl45_read(bp, phy, 9118 bnx2x_cl45_read(bp, phy,
8930 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1); 9119 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
8931 9120
8932 /* Set option 1G speed */ 9121 bnx2x_8727_config_speed(phy, params);
8933 if (phy->req_line_speed == SPEED_1000) {
8934 DP(NETIF_MSG_LINK, "Setting 1G force\n");
8935 bnx2x_cl45_write(bp, phy,
8936 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
8937 bnx2x_cl45_write(bp, phy,
8938 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
8939 bnx2x_cl45_read(bp, phy,
8940 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
8941 DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
8942 /* Power down the XAUI until link is up in case of dual-media
8943 * and 1G
8944 */
8945 if (DUAL_MEDIA(params)) {
8946 bnx2x_cl45_read(bp, phy,
8947 MDIO_PMA_DEVAD,
8948 MDIO_PMA_REG_8727_PCS_GP, &val);
8949 val |= (3<<10);
8950 bnx2x_cl45_write(bp, phy,
8951 MDIO_PMA_DEVAD,
8952 MDIO_PMA_REG_8727_PCS_GP, val);
8953 }
8954 } else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
8955 ((phy->speed_cap_mask &
8956 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) &&
8957 ((phy->speed_cap_mask &
8958 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
8959 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
8960
8961 DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
8962 bnx2x_cl45_write(bp, phy,
8963 MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0);
8964 bnx2x_cl45_write(bp, phy,
8965 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
8966 } else {
8967 /* Since the 8727 has only single reset pin, need to set the 10G
8968 * registers although it is default
8969 */
8970 bnx2x_cl45_write(bp, phy,
8971 MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL,
8972 0x0020);
8973 bnx2x_cl45_write(bp, phy,
8974 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100);
8975 bnx2x_cl45_write(bp, phy,
8976 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
8977 bnx2x_cl45_write(bp, phy,
8978 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2,
8979 0x0008);
8980 }
8981
8982 /* Set 2-wire transfer rate of SFP+ module EEPROM 9122 /* Set 2-wire transfer rate of SFP+ module EEPROM
8983 * to 100Khz since some DACs(direct attached cables) do 9123 * to 100Khz since some DACs(direct attached cables) do
8984 * not work at 400Khz. 9124 * not work at 400Khz.
@@ -9105,6 +9245,9 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
9105 bnx2x_sfp_module_detection(phy, params); 9245 bnx2x_sfp_module_detection(phy, params);
9106 else 9246 else
9107 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n"); 9247 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
9248
9249 /* Reconfigure link speed based on module type limitations */
9250 bnx2x_8727_config_speed(phy, params);
9108 } 9251 }
9109 9252
9110 DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n", 9253 DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
@@ -9585,9 +9728,9 @@ static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
9585static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, 9728static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
9586 struct link_params *params, 9729 struct link_params *params,
9587 u16 fw_cmd, 9730 u16 fw_cmd,
9588 u16 cmd_args[]) 9731 u16 cmd_args[], int argc)
9589{ 9732{
9590 u32 idx; 9733 int idx;
9591 u16 val; 9734 u16 val;
9592 struct bnx2x *bp = params->bp; 9735 struct bnx2x *bp = params->bp;
9593 /* Write CMD_OPEN_OVERRIDE to STATUS reg */ 9736 /* Write CMD_OPEN_OVERRIDE to STATUS reg */
@@ -9599,7 +9742,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
9599 MDIO_84833_CMD_HDLR_STATUS, &val); 9742 MDIO_84833_CMD_HDLR_STATUS, &val);
9600 if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS) 9743 if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS)
9601 break; 9744 break;
9602 msleep(1); 9745 usleep_range(1000, 2000);
9603 } 9746 }
9604 if (idx >= PHY84833_CMDHDLR_WAIT) { 9747 if (idx >= PHY84833_CMDHDLR_WAIT) {
9605 DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n"); 9748 DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
@@ -9607,7 +9750,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
9607 } 9750 }
9608 9751
9609 /* Prepare argument(s) and issue command */ 9752 /* Prepare argument(s) and issue command */
9610 for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) { 9753 for (idx = 0; idx < argc; idx++) {
9611 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, 9754 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
9612 MDIO_84833_CMD_HDLR_DATA1 + idx, 9755 MDIO_84833_CMD_HDLR_DATA1 + idx,
9613 cmd_args[idx]); 9756 cmd_args[idx]);
@@ -9620,7 +9763,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
9620 if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) || 9763 if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) ||
9621 (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) 9764 (val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
9622 break; 9765 break;
9623 msleep(1); 9766 usleep_range(1000, 2000);
9624 } 9767 }
9625 if ((idx >= PHY84833_CMDHDLR_WAIT) || 9768 if ((idx >= PHY84833_CMDHDLR_WAIT) ||
9626 (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) { 9769 (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
@@ -9628,7 +9771,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
9628 return -EINVAL; 9771 return -EINVAL;
9629 } 9772 }
9630 /* Gather returning data */ 9773 /* Gather returning data */
9631 for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) { 9774 for (idx = 0; idx < argc; idx++) {
9632 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 9775 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
9633 MDIO_84833_CMD_HDLR_DATA1 + idx, 9776 MDIO_84833_CMD_HDLR_DATA1 + idx,
9634 &cmd_args[idx]); 9777 &cmd_args[idx]);
@@ -9662,7 +9805,7 @@ static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
9662 data[1] = (u16)pair_swap; 9805 data[1] = (u16)pair_swap;
9663 9806
9664 status = bnx2x_84833_cmd_hdlr(phy, params, 9807 status = bnx2x_84833_cmd_hdlr(phy, params,
9665 PHY84833_CMD_SET_PAIR_SWAP, data); 9808 PHY84833_CMD_SET_PAIR_SWAP, data, PHY84833_CMDHDLR_MAX_ARGS);
9666 if (status == 0) 9809 if (status == 0)
9667 DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]); 9810 DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]);
9668 9811
@@ -9740,6 +9883,95 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
9740 return 0; 9883 return 0;
9741} 9884}
9742 9885
9886static int bnx2x_8483x_eee_timers(struct link_params *params,
9887 struct link_vars *vars)
9888{
9889 u32 eee_idle = 0, eee_mode;
9890 struct bnx2x *bp = params->bp;
9891
9892 eee_idle = bnx2x_eee_calc_timer(params);
9893
9894 if (eee_idle) {
9895 REG_WR(bp, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2),
9896 eee_idle);
9897 } else if ((params->eee_mode & EEE_MODE_ENABLE_LPI) &&
9898 (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) &&
9899 (params->eee_mode & EEE_MODE_OUTPUT_TIME)) {
9900 DP(NETIF_MSG_LINK, "Error: Tx LPI is enabled with timer 0\n");
9901 return -EINVAL;
9902 }
9903
9904 vars->eee_status &= ~(SHMEM_EEE_TIMER_MASK | SHMEM_EEE_TIME_OUTPUT_BIT);
9905 if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
9906 /* eee_idle in 1u --> eee_status in 16u */
9907 eee_idle >>= 4;
9908 vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) |
9909 SHMEM_EEE_TIME_OUTPUT_BIT;
9910 } else {
9911 if (bnx2x_eee_time_to_nvram(eee_idle, &eee_mode))
9912 return -EINVAL;
9913 vars->eee_status |= eee_mode;
9914 }
9915
9916 return 0;
9917}
9918
9919static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
9920 struct link_params *params,
9921 struct link_vars *vars)
9922{
9923 int rc;
9924 struct bnx2x *bp = params->bp;
9925 u16 cmd_args = 0;
9926
9927 DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n");
9928
9929 /* Make Certain LPI is disabled */
9930 REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0);
9931 REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 0);
9932
9933 /* Prevent Phy from working in EEE and advertising it */
9934 rc = bnx2x_84833_cmd_hdlr(phy, params,
9935 PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
9936 if (rc) {
9937 DP(NETIF_MSG_LINK, "EEE disable failed.\n");
9938 return rc;
9939 }
9940
9941 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0);
9942 vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
9943
9944 return 0;
9945}
9946
9947static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
9948 struct link_params *params,
9949 struct link_vars *vars)
9950{
9951 int rc;
9952 struct bnx2x *bp = params->bp;
9953 u16 cmd_args = 1;
9954
9955 DP(NETIF_MSG_LINK, "Advertise 10GBase-T EEE\n");
9956
9957 rc = bnx2x_84833_cmd_hdlr(phy, params,
9958 PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
9959 if (rc) {
9960 DP(NETIF_MSG_LINK, "EEE enable failed.\n");
9961 return rc;
9962 }
9963
9964 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x8);
9965
9966 /* Mask events preventing LPI generation */
9967 REG_WR(bp, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20);
9968
9969 vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
9970 vars->eee_status |= (SHMEM_EEE_10G_ADV << SHMEM_EEE_ADV_STATUS_SHIFT);
9971
9972 return 0;
9973}
9974
9743#define PHY84833_CONSTANT_LATENCY 1193 9975#define PHY84833_CONSTANT_LATENCY 1193
9744static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, 9976static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
9745 struct link_params *params, 9977 struct link_params *params,
@@ -9752,7 +9984,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
9752 u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS]; 9984 u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS];
9753 int rc = 0; 9985 int rc = 0;
9754 9986
9755 msleep(1); 9987 usleep_range(1000, 2000);
9756 9988
9757 if (!(CHIP_IS_E1x(bp))) 9989 if (!(CHIP_IS_E1x(bp)))
9758 port = BP_PATH(bp); 9990 port = BP_PATH(bp);
@@ -9839,8 +10071,9 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
9839 cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1; 10071 cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1;
9840 cmd_args[3] = PHY84833_CONSTANT_LATENCY; 10072 cmd_args[3] = PHY84833_CONSTANT_LATENCY;
9841 rc = bnx2x_84833_cmd_hdlr(phy, params, 10073 rc = bnx2x_84833_cmd_hdlr(phy, params,
9842 PHY84833_CMD_SET_EEE_MODE, cmd_args); 10074 PHY84833_CMD_SET_EEE_MODE, cmd_args,
9843 if (rc != 0) 10075 PHY84833_CMDHDLR_MAX_ARGS);
10076 if (rc)
9844 DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n"); 10077 DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
9845 } 10078 }
9846 if (initialize) 10079 if (initialize)
@@ -9864,6 +10097,48 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
9864 MDIO_CTL_REG_84823_USER_CTRL_REG, val); 10097 MDIO_CTL_REG_84823_USER_CTRL_REG, val);
9865 } 10098 }
9866 10099
10100 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
10101 MDIO_84833_TOP_CFG_FW_REV, &val);
10102
10103 /* Configure EEE support */
10104 if ((val >= MDIO_84833_TOP_CFG_FW_EEE) && bnx2x_eee_has_cap(params)) {
10105 phy->flags |= FLAGS_EEE_10GBT;
10106 vars->eee_status |= SHMEM_EEE_10G_ADV <<
10107 SHMEM_EEE_SUPPORTED_SHIFT;
10108 /* Propogate params' bits --> vars (for migration exposure) */
10109 if (params->eee_mode & EEE_MODE_ENABLE_LPI)
10110 vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT;
10111 else
10112 vars->eee_status &= ~SHMEM_EEE_LPI_REQUESTED_BIT;
10113
10114 if (params->eee_mode & EEE_MODE_ADV_LPI)
10115 vars->eee_status |= SHMEM_EEE_REQUESTED_BIT;
10116 else
10117 vars->eee_status &= ~SHMEM_EEE_REQUESTED_BIT;
10118
10119 rc = bnx2x_8483x_eee_timers(params, vars);
10120 if (rc) {
10121 DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n");
10122 bnx2x_8483x_disable_eee(phy, params, vars);
10123 return rc;
10124 }
10125
10126 if ((params->req_duplex[actual_phy_selection] == DUPLEX_FULL) &&
10127 (params->eee_mode & EEE_MODE_ADV_LPI) &&
10128 (bnx2x_eee_calc_timer(params) ||
10129 !(params->eee_mode & EEE_MODE_ENABLE_LPI)))
10130 rc = bnx2x_8483x_enable_eee(phy, params, vars);
10131 else
10132 rc = bnx2x_8483x_disable_eee(phy, params, vars);
10133 if (rc) {
10134 DP(NETIF_MSG_LINK, "Failed to set EEE advertisment\n");
10135 return rc;
10136 }
10137 } else {
10138 phy->flags &= ~FLAGS_EEE_10GBT;
10139 vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
10140 }
10141
9867 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { 10142 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
9868 /* Bring PHY out of super isolate mode as the final step. */ 10143 /* Bring PHY out of super isolate mode as the final step. */
9869 bnx2x_cl45_read(bp, phy, 10144 bnx2x_cl45_read(bp, phy,
@@ -9918,17 +10193,19 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
9918 DP(NETIF_MSG_LINK, "Legacy speed status = 0x%x\n", 10193 DP(NETIF_MSG_LINK, "Legacy speed status = 0x%x\n",
9919 legacy_status); 10194 legacy_status);
9920 link_up = ((legacy_status & (1<<11)) == (1<<11)); 10195 link_up = ((legacy_status & (1<<11)) == (1<<11));
9921 if (link_up) { 10196 legacy_speed = (legacy_status & (3<<9));
9922 legacy_speed = (legacy_status & (3<<9)); 10197 if (legacy_speed == (0<<9))
9923 if (legacy_speed == (0<<9)) 10198 vars->line_speed = SPEED_10;
9924 vars->line_speed = SPEED_10; 10199 else if (legacy_speed == (1<<9))
9925 else if (legacy_speed == (1<<9)) 10200 vars->line_speed = SPEED_100;
9926 vars->line_speed = SPEED_100; 10201 else if (legacy_speed == (2<<9))
9927 else if (legacy_speed == (2<<9)) 10202 vars->line_speed = SPEED_1000;
9928 vars->line_speed = SPEED_1000; 10203 else { /* Should not happen: Treat as link down */
9929 else /* Should not happen */ 10204 vars->line_speed = 0;
9930 vars->line_speed = 0; 10205 link_up = 0;
10206 }
9931 10207
10208 if (link_up) {
9932 if (legacy_status & (1<<8)) 10209 if (legacy_status & (1<<8))
9933 vars->duplex = DUPLEX_FULL; 10210 vars->duplex = DUPLEX_FULL;
9934 else 10211 else
@@ -9956,7 +10233,7 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
9956 } 10233 }
9957 } 10234 }
9958 if (link_up) { 10235 if (link_up) {
9959 DP(NETIF_MSG_LINK, "BCM84823: link speed is %d\n", 10236 DP(NETIF_MSG_LINK, "BCM848x3: link speed is %d\n",
9960 vars->line_speed); 10237 vars->line_speed);
9961 bnx2x_ext_phy_resolve_fc(phy, params, vars); 10238 bnx2x_ext_phy_resolve_fc(phy, params, vars);
9962 10239
@@ -9995,6 +10272,31 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
9995 if (val & (1<<11)) 10272 if (val & (1<<11))
9996 vars->link_status |= 10273 vars->link_status |=
9997 LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; 10274 LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
10275
10276 /* Determine if EEE was negotiated */
10277 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
10278 u32 eee_shmem = 0;
10279
10280 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
10281 MDIO_AN_REG_EEE_ADV, &val1);
10282 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
10283 MDIO_AN_REG_LP_EEE_ADV, &val2);
10284 if ((val1 & val2) & 0x8) {
10285 DP(NETIF_MSG_LINK, "EEE negotiated\n");
10286 vars->eee_status |= SHMEM_EEE_ACTIVE_BIT;
10287 }
10288
10289 if (val2 & 0x12)
10290 eee_shmem |= SHMEM_EEE_100M_ADV;
10291 if (val2 & 0x4)
10292 eee_shmem |= SHMEM_EEE_1G_ADV;
10293 if (val2 & 0x68)
10294 eee_shmem |= SHMEM_EEE_10G_ADV;
10295
10296 vars->eee_status &= ~SHMEM_EEE_LP_ADV_STATUS_MASK;
10297 vars->eee_status |= (eee_shmem <<
10298 SHMEM_EEE_LP_ADV_STATUS_SHIFT);
10299 }
9998 } 10300 }
9999 10301
10000 return link_up; 10302 return link_up;
@@ -10273,7 +10575,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10273 u32 cfg_pin; 10575 u32 cfg_pin;
10274 10576
10275 DP(NETIF_MSG_LINK, "54618SE cfg init\n"); 10577 DP(NETIF_MSG_LINK, "54618SE cfg init\n");
10276 usleep_range(1000, 1000); 10578 usleep_range(1000, 2000);
10277 10579
10278 /* This works with E3 only, no need to check the chip 10580 /* This works with E3 only, no need to check the chip
10279 * before determining the port. 10581 * before determining the port.
@@ -10342,7 +10644,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10342 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) 10644 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
10343 fc_val |= MDIO_AN_REG_ADV_PAUSE_PAUSE; 10645 fc_val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
10344 10646
10345 /* read all advertisement */ 10647 /* Read all advertisement */
10346 bnx2x_cl22_read(bp, phy, 10648 bnx2x_cl22_read(bp, phy,
10347 0x09, 10649 0x09,
10348 &an_1000_val); 10650 &an_1000_val);
@@ -10379,7 +10681,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10379 0x09, 10681 0x09,
10380 &an_1000_val); 10682 &an_1000_val);
10381 10683
10382 /* set 100 speed advertisement */ 10684 /* Set 100 speed advertisement */
10383 if (((phy->req_line_speed == SPEED_AUTO_NEG) && 10685 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
10384 (phy->speed_cap_mask & 10686 (phy->speed_cap_mask &
10385 (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | 10687 (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
@@ -10393,7 +10695,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10393 DP(NETIF_MSG_LINK, "Advertising 100M\n"); 10695 DP(NETIF_MSG_LINK, "Advertising 100M\n");
10394 } 10696 }
10395 10697
10396 /* set 10 speed advertisement */ 10698 /* Set 10 speed advertisement */
10397 if (((phy->req_line_speed == SPEED_AUTO_NEG) && 10699 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
10398 (phy->speed_cap_mask & 10700 (phy->speed_cap_mask &
10399 (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL | 10701 (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
@@ -10532,7 +10834,7 @@ static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy,
10532 10834
10533 /* Get speed operation status */ 10835 /* Get speed operation status */
10534 bnx2x_cl22_read(bp, phy, 10836 bnx2x_cl22_read(bp, phy,
10535 0x19, 10837 MDIO_REG_GPHY_AUX_STATUS,
10536 &legacy_status); 10838 &legacy_status);
10537 DP(NETIF_MSG_LINK, "54618SE read_status: 0x%x\n", legacy_status); 10839 DP(NETIF_MSG_LINK, "54618SE read_status: 0x%x\n", legacy_status);
10538 10840
@@ -10759,7 +11061,7 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
10759 DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n", 11061 DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n",
10760 val2, val1); 11062 val2, val1);
10761 link_up = ((val1 & 4) == 4); 11063 link_up = ((val1 & 4) == 4);
10762 /* if link is up print the AN outcome of the SFX7101 PHY */ 11064 /* If link is up print the AN outcome of the SFX7101 PHY */
10763 if (link_up) { 11065 if (link_up) {
10764 bnx2x_cl45_read(bp, phy, 11066 bnx2x_cl45_read(bp, phy,
10765 MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS, 11067 MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
@@ -10771,7 +11073,7 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
10771 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); 11073 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
10772 bnx2x_ext_phy_resolve_fc(phy, params, vars); 11074 bnx2x_ext_phy_resolve_fc(phy, params, vars);
10773 11075
10774 /* read LP advertised speeds */ 11076 /* Read LP advertised speeds */
10775 if (val2 & (1<<11)) 11077 if (val2 & (1<<11))
10776 vars->link_status |= 11078 vars->link_status |=
10777 LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; 11079 LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
@@ -11090,7 +11392,7 @@ static struct bnx2x_phy phy_8706 = {
11090 SUPPORTED_FIBRE | 11392 SUPPORTED_FIBRE |
11091 SUPPORTED_Pause | 11393 SUPPORTED_Pause |
11092 SUPPORTED_Asym_Pause), 11394 SUPPORTED_Asym_Pause),
11093 .media_type = ETH_PHY_SFP_FIBER, 11395 .media_type = ETH_PHY_SFPP_10G_FIBER,
11094 .ver_addr = 0, 11396 .ver_addr = 0,
11095 .req_flow_ctrl = 0, 11397 .req_flow_ctrl = 0,
11096 .req_line_speed = 0, 11398 .req_line_speed = 0,
@@ -11249,7 +11551,8 @@ static struct bnx2x_phy phy_84833 = {
11249 .def_md_devad = 0, 11551 .def_md_devad = 0,
11250 .flags = (FLAGS_FAN_FAILURE_DET_REQ | 11552 .flags = (FLAGS_FAN_FAILURE_DET_REQ |
11251 FLAGS_REARM_LATCH_SIGNAL | 11553 FLAGS_REARM_LATCH_SIGNAL |
11252 FLAGS_TX_ERROR_CHECK), 11554 FLAGS_TX_ERROR_CHECK |
11555 FLAGS_EEE_10GBT),
11253 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11556 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11254 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11557 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11255 .mdio_ctrl = 0, 11558 .mdio_ctrl = 0,
@@ -11428,7 +11731,7 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
11428 SUPPORTED_FIBRE | 11731 SUPPORTED_FIBRE |
11429 SUPPORTED_Pause | 11732 SUPPORTED_Pause |
11430 SUPPORTED_Asym_Pause); 11733 SUPPORTED_Asym_Pause);
11431 phy->media_type = ETH_PHY_SFP_FIBER; 11734 phy->media_type = ETH_PHY_SFPP_10G_FIBER;
11432 break; 11735 break;
11433 case PORT_HW_CFG_NET_SERDES_IF_KR: 11736 case PORT_HW_CFG_NET_SERDES_IF_KR:
11434 phy->media_type = ETH_PHY_KR; 11737 phy->media_type = ETH_PHY_KR;
@@ -11968,7 +12271,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
11968 vars->mac_type = MAC_TYPE_NONE; 12271 vars->mac_type = MAC_TYPE_NONE;
11969 vars->phy_flags = 0; 12272 vars->phy_flags = 0;
11970 12273
11971 /* disable attentions */ 12274 /* Disable attentions */
11972 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, 12275 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
11973 (NIG_MASK_XGXS0_LINK_STATUS | 12276 (NIG_MASK_XGXS0_LINK_STATUS |
11974 NIG_MASK_XGXS0_LINK10G | 12277 NIG_MASK_XGXS0_LINK10G |
@@ -12017,6 +12320,8 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
12017 break; 12320 break;
12018 } 12321 }
12019 bnx2x_update_mng(params, vars->link_status); 12322 bnx2x_update_mng(params, vars->link_status);
12323
12324 bnx2x_update_mng_eee(params, vars->eee_status);
12020 return 0; 12325 return 0;
12021} 12326}
12022 12327
@@ -12026,19 +12331,22 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
12026 struct bnx2x *bp = params->bp; 12331 struct bnx2x *bp = params->bp;
12027 u8 phy_index, port = params->port, clear_latch_ind = 0; 12332 u8 phy_index, port = params->port, clear_latch_ind = 0;
12028 DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port); 12333 DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
12029 /* disable attentions */ 12334 /* Disable attentions */
12030 vars->link_status = 0; 12335 vars->link_status = 0;
12031 bnx2x_update_mng(params, vars->link_status); 12336 bnx2x_update_mng(params, vars->link_status);
12337 vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK |
12338 SHMEM_EEE_ACTIVE_BIT);
12339 bnx2x_update_mng_eee(params, vars->eee_status);
12032 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 12340 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
12033 (NIG_MASK_XGXS0_LINK_STATUS | 12341 (NIG_MASK_XGXS0_LINK_STATUS |
12034 NIG_MASK_XGXS0_LINK10G | 12342 NIG_MASK_XGXS0_LINK10G |
12035 NIG_MASK_SERDES0_LINK_STATUS | 12343 NIG_MASK_SERDES0_LINK_STATUS |
12036 NIG_MASK_MI_INT)); 12344 NIG_MASK_MI_INT));
12037 12345
12038 /* activate nig drain */ 12346 /* Activate nig drain */
12039 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); 12347 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
12040 12348
12041 /* disable nig egress interface */ 12349 /* Disable nig egress interface */
12042 if (!CHIP_IS_E3(bp)) { 12350 if (!CHIP_IS_E3(bp)) {
12043 REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0); 12351 REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0);
12044 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0); 12352 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
@@ -12051,15 +12359,15 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
12051 bnx2x_xmac_disable(params); 12359 bnx2x_xmac_disable(params);
12052 bnx2x_umac_disable(params); 12360 bnx2x_umac_disable(params);
12053 } 12361 }
12054 /* disable emac */ 12362 /* Disable emac */
12055 if (!CHIP_IS_E3(bp)) 12363 if (!CHIP_IS_E3(bp))
12056 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); 12364 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
12057 12365
12058 msleep(10); 12366 usleep_range(10000, 20000);
12059 /* The PHY reset is controlled by GPIO 1 12367 /* The PHY reset is controlled by GPIO 1
12060 * Hold it as vars low 12368 * Hold it as vars low
12061 */ 12369 */
12062 /* clear link led */ 12370 /* Clear link led */
12063 bnx2x_set_mdio_clk(bp, params->chip_id, port); 12371 bnx2x_set_mdio_clk(bp, params->chip_id, port);
12064 bnx2x_set_led(params, vars, LED_MODE_OFF, 0); 12372 bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
12065 12373
@@ -12089,9 +12397,9 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
12089 params->phy[INT_PHY].link_reset( 12397 params->phy[INT_PHY].link_reset(
12090 &params->phy[INT_PHY], params); 12398 &params->phy[INT_PHY], params);
12091 12399
12092 /* disable nig ingress interface */ 12400 /* Disable nig ingress interface */
12093 if (!CHIP_IS_E3(bp)) { 12401 if (!CHIP_IS_E3(bp)) {
12094 /* reset BigMac */ 12402 /* Reset BigMac */
12095 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 12403 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
12096 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 12404 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
12097 REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0); 12405 REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0);
@@ -12148,7 +12456,7 @@ static int bnx2x_8073_common_init_phy(struct bnx2x *bp,
12148 DP(NETIF_MSG_LINK, "populate_phy failed\n"); 12456 DP(NETIF_MSG_LINK, "populate_phy failed\n");
12149 return -EINVAL; 12457 return -EINVAL;
12150 } 12458 }
12151 /* disable attentions */ 12459 /* Disable attentions */
12152 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + 12460 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
12153 port_of_path*4, 12461 port_of_path*4,
12154 (NIG_MASK_XGXS0_LINK_STATUS | 12462 (NIG_MASK_XGXS0_LINK_STATUS |
@@ -12222,7 +12530,7 @@ static int bnx2x_8073_common_init_phy(struct bnx2x *bp,
12222 bnx2x_cl45_write(bp, phy_blk[port], 12530 bnx2x_cl45_write(bp, phy_blk[port],
12223 MDIO_PMA_DEVAD, 12531 MDIO_PMA_DEVAD,
12224 MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10)))); 12532 MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
12225 msleep(15); 12533 usleep_range(15000, 30000);
12226 12534
12227 /* Read modify write the SPI-ROM version select register */ 12535 /* Read modify write the SPI-ROM version select register */
12228 bnx2x_cl45_read(bp, phy_blk[port], 12536 bnx2x_cl45_read(bp, phy_blk[port],
@@ -12254,7 +12562,7 @@ static int bnx2x_8726_common_init_phy(struct bnx2x *bp,
12254 REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val); 12562 REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
12255 12563
12256 bnx2x_ext_phy_hw_reset(bp, 0); 12564 bnx2x_ext_phy_hw_reset(bp, 0);
12257 msleep(5); 12565 usleep_range(5000, 10000);
12258 for (port = 0; port < PORT_MAX; port++) { 12566 for (port = 0; port < PORT_MAX; port++) {
12259 u32 shmem_base, shmem2_base; 12567 u32 shmem_base, shmem2_base;
12260 12568
@@ -12361,11 +12669,11 @@ static int bnx2x_8727_common_init_phy(struct bnx2x *bp,
12361 /* Initiate PHY reset*/ 12669 /* Initiate PHY reset*/
12362 bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW, 12670 bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW,
12363 port); 12671 port);
12364 msleep(1); 12672 usleep_range(1000, 2000);
12365 bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH, 12673 bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH,
12366 port); 12674 port);
12367 12675
12368 msleep(5); 12676 usleep_range(5000, 10000);
12369 12677
12370 /* PART1 - Reset both phys */ 12678 /* PART1 - Reset both phys */
12371 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 12679 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
@@ -12459,7 +12767,7 @@ static int bnx2x_84833_pre_init_phy(struct bnx2x *bp,
12459 MDIO_PMA_REG_CTRL, &val); 12767 MDIO_PMA_REG_CTRL, &val);
12460 if (!(val & (1<<15))) 12768 if (!(val & (1<<15)))
12461 break; 12769 break;
12462 msleep(1); 12770 usleep_range(1000, 2000);
12463 } 12771 }
12464 if (cnt >= 1500) { 12772 if (cnt >= 1500) {
12465 DP(NETIF_MSG_LINK, "84833 reset timeout\n"); 12773 DP(NETIF_MSG_LINK, "84833 reset timeout\n");
@@ -12549,7 +12857,7 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
12549 break; 12857 break;
12550 } 12858 }
12551 12859
12552 if (rc != 0) 12860 if (rc)
12553 netdev_err(bp->dev, "Warning: PHY was not initialized," 12861 netdev_err(bp->dev, "Warning: PHY was not initialized,"
12554 " Port %d\n", 12862 " Port %d\n",
12555 0); 12863 0);
@@ -12630,30 +12938,41 @@ static void bnx2x_check_over_curr(struct link_params *params,
12630 vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG; 12938 vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG;
12631} 12939}
12632 12940
12633static void bnx2x_analyze_link_error(struct link_params *params, 12941/* Returns 0 if no change occured since last check; 1 otherwise. */
12634 struct link_vars *vars, u32 lss_status, 12942static u8 bnx2x_analyze_link_error(struct link_params *params,
12635 u8 notify) 12943 struct link_vars *vars, u32 status,
12944 u32 phy_flag, u32 link_flag, u8 notify)
12636{ 12945{
12637 struct bnx2x *bp = params->bp; 12946 struct bnx2x *bp = params->bp;
12638 /* Compare new value with previous value */ 12947 /* Compare new value with previous value */
12639 u8 led_mode; 12948 u8 led_mode;
12640 u32 half_open_conn = (vars->phy_flags & PHY_HALF_OPEN_CONN_FLAG) > 0; 12949 u32 old_status = (vars->phy_flags & phy_flag) ? 1 : 0;
12641 12950
12642 if ((lss_status ^ half_open_conn) == 0) 12951 if ((status ^ old_status) == 0)
12643 return; 12952 return 0;
12644 12953
12645 /* If values differ */ 12954 /* If values differ */
12646 DP(NETIF_MSG_LINK, "Link changed:%x %x->%x\n", vars->link_up, 12955 switch (phy_flag) {
12647 half_open_conn, lss_status); 12956 case PHY_HALF_OPEN_CONN_FLAG:
12957 DP(NETIF_MSG_LINK, "Analyze Remote Fault\n");
12958 break;
12959 case PHY_SFP_TX_FAULT_FLAG:
12960 DP(NETIF_MSG_LINK, "Analyze TX Fault\n");
12961 break;
12962 default:
12963 DP(NETIF_MSG_LINK, "Analyze UNKOWN\n");
12964 }
12965 DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up,
12966 old_status, status);
12648 12967
12649 /* a. Update shmem->link_status accordingly 12968 /* a. Update shmem->link_status accordingly
12650 * b. Update link_vars->link_up 12969 * b. Update link_vars->link_up
12651 */ 12970 */
12652 if (lss_status) { 12971 if (status) {
12653 DP(NETIF_MSG_LINK, "Remote Fault detected !!!\n");
12654 vars->link_status &= ~LINK_STATUS_LINK_UP; 12972 vars->link_status &= ~LINK_STATUS_LINK_UP;
12973 vars->link_status |= link_flag;
12655 vars->link_up = 0; 12974 vars->link_up = 0;
12656 vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; 12975 vars->phy_flags |= phy_flag;
12657 12976
12658 /* activate nig drain */ 12977 /* activate nig drain */
12659 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1); 12978 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1);
@@ -12662,10 +12981,10 @@ static void bnx2x_analyze_link_error(struct link_params *params,
12662 */ 12981 */
12663 led_mode = LED_MODE_OFF; 12982 led_mode = LED_MODE_OFF;
12664 } else { 12983 } else {
12665 DP(NETIF_MSG_LINK, "Remote Fault cleared\n");
12666 vars->link_status |= LINK_STATUS_LINK_UP; 12984 vars->link_status |= LINK_STATUS_LINK_UP;
12985 vars->link_status &= ~link_flag;
12667 vars->link_up = 1; 12986 vars->link_up = 1;
12668 vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG; 12987 vars->phy_flags &= ~phy_flag;
12669 led_mode = LED_MODE_OPER; 12988 led_mode = LED_MODE_OPER;
12670 12989
12671 /* Clear nig drain */ 12990 /* Clear nig drain */
@@ -12682,6 +13001,8 @@ static void bnx2x_analyze_link_error(struct link_params *params,
12682 vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT; 13001 vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT;
12683 if (notify) 13002 if (notify)
12684 bnx2x_notify_link_changed(bp); 13003 bnx2x_notify_link_changed(bp);
13004
13005 return 1;
12685} 13006}
12686 13007
12687/****************************************************************************** 13008/******************************************************************************
@@ -12723,7 +13044,9 @@ int bnx2x_check_half_open_conn(struct link_params *params,
12723 if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS)) 13044 if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS))
12724 lss_status = 1; 13045 lss_status = 1;
12725 13046
12726 bnx2x_analyze_link_error(params, vars, lss_status, notify); 13047 bnx2x_analyze_link_error(params, vars, lss_status,
13048 PHY_HALF_OPEN_CONN_FLAG,
13049 LINK_STATUS_NONE, notify);
12727 } else if (REG_RD(bp, MISC_REG_RESET_REG_2) & 13050 } else if (REG_RD(bp, MISC_REG_RESET_REG_2) &
12728 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) { 13051 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) {
12729 /* Check E1X / E2 BMAC */ 13052 /* Check E1X / E2 BMAC */
@@ -12740,11 +13063,55 @@ int bnx2x_check_half_open_conn(struct link_params *params,
12740 REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2); 13063 REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2);
12741 lss_status = (wb_data[0] > 0); 13064 lss_status = (wb_data[0] > 0);
12742 13065
12743 bnx2x_analyze_link_error(params, vars, lss_status, notify); 13066 bnx2x_analyze_link_error(params, vars, lss_status,
13067 PHY_HALF_OPEN_CONN_FLAG,
13068 LINK_STATUS_NONE, notify);
12744 } 13069 }
12745 return 0; 13070 return 0;
12746} 13071}
13072static void bnx2x_sfp_tx_fault_detection(struct bnx2x_phy *phy,
13073 struct link_params *params,
13074 struct link_vars *vars)
13075{
13076 struct bnx2x *bp = params->bp;
13077 u32 cfg_pin, value = 0;
13078 u8 led_change, port = params->port;
12747 13079
13080 /* Get The SFP+ TX_Fault controlling pin ([eg]pio) */
13081 cfg_pin = (REG_RD(bp, params->shmem_base + offsetof(struct shmem_region,
13082 dev_info.port_hw_config[port].e3_cmn_pin_cfg)) &
13083 PORT_HW_CFG_E3_TX_FAULT_MASK) >>
13084 PORT_HW_CFG_E3_TX_FAULT_SHIFT;
13085
13086 if (bnx2x_get_cfg_pin(bp, cfg_pin, &value)) {
13087 DP(NETIF_MSG_LINK, "Failed to read pin 0x%02x\n", cfg_pin);
13088 return;
13089 }
13090
13091 led_change = bnx2x_analyze_link_error(params, vars, value,
13092 PHY_SFP_TX_FAULT_FLAG,
13093 LINK_STATUS_SFP_TX_FAULT, 1);
13094
13095 if (led_change) {
13096 /* Change TX_Fault led, set link status for further syncs */
13097 u8 led_mode;
13098
13099 if (vars->phy_flags & PHY_SFP_TX_FAULT_FLAG) {
13100 led_mode = MISC_REGISTERS_GPIO_HIGH;
13101 vars->link_status |= LINK_STATUS_SFP_TX_FAULT;
13102 } else {
13103 led_mode = MISC_REGISTERS_GPIO_LOW;
13104 vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT;
13105 }
13106
13107 /* If module is unapproved, led should be on regardless */
13108 if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) {
13109 DP(NETIF_MSG_LINK, "Change TX_Fault LED: ->%x\n",
13110 led_mode);
13111 bnx2x_set_e3_module_fault_led(params, led_mode);
13112 }
13113 }
13114}
12748void bnx2x_period_func(struct link_params *params, struct link_vars *vars) 13115void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
12749{ 13116{
12750 u16 phy_idx; 13117 u16 phy_idx;
@@ -12763,7 +13130,26 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
12763 struct bnx2x_phy *phy = &params->phy[INT_PHY]; 13130 struct bnx2x_phy *phy = &params->phy[INT_PHY];
12764 bnx2x_set_aer_mmd(params, phy); 13131 bnx2x_set_aer_mmd(params, phy);
12765 bnx2x_check_over_curr(params, vars); 13132 bnx2x_check_over_curr(params, vars);
12766 bnx2x_warpcore_config_runtime(phy, params, vars); 13133 if (vars->rx_tx_asic_rst)
13134 bnx2x_warpcore_config_runtime(phy, params, vars);
13135
13136 if ((REG_RD(bp, params->shmem_base +
13137 offsetof(struct shmem_region, dev_info.
13138 port_hw_config[params->port].default_cfg))
13139 & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
13140 PORT_HW_CFG_NET_SERDES_IF_SFI) {
13141 if (bnx2x_is_sfp_module_plugged(phy, params)) {
13142 bnx2x_sfp_tx_fault_detection(phy, params, vars);
13143 } else if (vars->link_status &
13144 LINK_STATUS_SFP_TX_FAULT) {
13145 /* Clean trail, interrupt corrects the leds */
13146 vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT;
13147 vars->phy_flags &= ~PHY_SFP_TX_FAULT_FLAG;
13148 /* Update link status in the shared memory */
13149 bnx2x_update_mng(params, vars->link_status);
13150 }
13151 }
13152
12767 } 13153 }
12768 13154
12769} 13155}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index ea4371f4335f..51cac8130051 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -41,6 +41,7 @@
41#define SPEED_AUTO_NEG 0 41#define SPEED_AUTO_NEG 0
42#define SPEED_20000 20000 42#define SPEED_20000 20000
43 43
44#define SFP_EEPROM_PAGE_SIZE 16
44#define SFP_EEPROM_VENDOR_NAME_ADDR 0x14 45#define SFP_EEPROM_VENDOR_NAME_ADDR 0x14
45#define SFP_EEPROM_VENDOR_NAME_SIZE 16 46#define SFP_EEPROM_VENDOR_NAME_SIZE 16
46#define SFP_EEPROM_VENDOR_OUI_ADDR 0x25 47#define SFP_EEPROM_VENDOR_OUI_ADDR 0x25
@@ -125,6 +126,11 @@ typedef void (*set_link_led_t)(struct bnx2x_phy *phy,
125 struct link_params *params, u8 mode); 126 struct link_params *params, u8 mode);
126typedef void (*phy_specific_func_t)(struct bnx2x_phy *phy, 127typedef void (*phy_specific_func_t)(struct bnx2x_phy *phy,
127 struct link_params *params, u32 action); 128 struct link_params *params, u32 action);
129struct bnx2x_reg_set {
130 u8 devad;
131 u16 reg;
132 u16 val;
133};
128 134
129struct bnx2x_phy { 135struct bnx2x_phy {
130 u32 type; 136 u32 type;
@@ -149,6 +155,7 @@ struct bnx2x_phy {
149#define FLAGS_DUMMY_READ (1<<9) 155#define FLAGS_DUMMY_READ (1<<9)
150#define FLAGS_MDC_MDIO_WA_B0 (1<<10) 156#define FLAGS_MDC_MDIO_WA_B0 (1<<10)
151#define FLAGS_TX_ERROR_CHECK (1<<12) 157#define FLAGS_TX_ERROR_CHECK (1<<12)
158#define FLAGS_EEE_10GBT (1<<13)
152 159
153 /* preemphasis values for the rx side */ 160 /* preemphasis values for the rx side */
154 u16 rx_preemphasis[4]; 161 u16 rx_preemphasis[4];
@@ -162,14 +169,15 @@ struct bnx2x_phy {
162 u32 supported; 169 u32 supported;
163 170
164 u32 media_type; 171 u32 media_type;
165#define ETH_PHY_UNSPECIFIED 0x0 172#define ETH_PHY_UNSPECIFIED 0x0
166#define ETH_PHY_SFP_FIBER 0x1 173#define ETH_PHY_SFPP_10G_FIBER 0x1
167#define ETH_PHY_XFP_FIBER 0x2 174#define ETH_PHY_XFP_FIBER 0x2
168#define ETH_PHY_DA_TWINAX 0x3 175#define ETH_PHY_DA_TWINAX 0x3
169#define ETH_PHY_BASE_T 0x4 176#define ETH_PHY_BASE_T 0x4
170#define ETH_PHY_KR 0xf0 177#define ETH_PHY_SFP_1G_FIBER 0x5
171#define ETH_PHY_CX4 0xf1 178#define ETH_PHY_KR 0xf0
172#define ETH_PHY_NOT_PRESENT 0xff 179#define ETH_PHY_CX4 0xf1
180#define ETH_PHY_NOT_PRESENT 0xff
173 181
174 /* The address in which version is located*/ 182 /* The address in which version is located*/
175 u32 ver_addr; 183 u32 ver_addr;
@@ -265,6 +273,30 @@ struct link_params {
265 u8 num_phys; 273 u8 num_phys;
266 274
267 u8 rsrv; 275 u8 rsrv;
276
277 /* Used to configure the EEE Tx LPI timer, has several modes of
278 * operation, according to bits 29:28 -
279 * 2'b00: Timer will be configured by nvram, output will be the value
280 * from nvram.
281 * 2'b01: Timer will be configured by nvram, output will be in
282 * microseconds.
283 * 2'b10: bits 1:0 contain an nvram value which will be used instead
284 * of the one located in the nvram. Output will be that value.
285 * 2'b11: bits 19:0 contain the idle timer in microseconds; output
286 * will be in microseconds.
287 * Bits 31:30 should be 2'b11 in order for EEE to be enabled.
288 */
289 u32 eee_mode;
290#define EEE_MODE_NVRAM_BALANCED_TIME (0xa00)
291#define EEE_MODE_NVRAM_AGGRESSIVE_TIME (0x100)
292#define EEE_MODE_NVRAM_LATENCY_TIME (0x6000)
293#define EEE_MODE_NVRAM_MASK (0x3)
294#define EEE_MODE_TIMER_MASK (0xfffff)
295#define EEE_MODE_OUTPUT_TIME (1<<28)
296#define EEE_MODE_OVERRIDE_NVRAM (1<<29)
297#define EEE_MODE_ENABLE_LPI (1<<30)
298#define EEE_MODE_ADV_LPI (1<<31)
299
268 u16 hw_led_mode; /* part of the hw_config read from the shmem */ 300 u16 hw_led_mode; /* part of the hw_config read from the shmem */
269 u32 multi_phy_config; 301 u32 multi_phy_config;
270 302
@@ -282,6 +314,7 @@ struct link_vars {
282#define PHY_PHYSICAL_LINK_FLAG (1<<2) 314#define PHY_PHYSICAL_LINK_FLAG (1<<2)
283#define PHY_HALF_OPEN_CONN_FLAG (1<<3) 315#define PHY_HALF_OPEN_CONN_FLAG (1<<3)
284#define PHY_OVER_CURRENT_FLAG (1<<4) 316#define PHY_OVER_CURRENT_FLAG (1<<4)
317#define PHY_SFP_TX_FAULT_FLAG (1<<5)
285 318
286 u8 mac_type; 319 u8 mac_type;
287#define MAC_TYPE_NONE 0 320#define MAC_TYPE_NONE 0
@@ -301,6 +334,7 @@ struct link_vars {
301 334
302 /* The same definitions as the shmem parameter */ 335 /* The same definitions as the shmem parameter */
303 u32 link_status; 336 u32 link_status;
337 u32 eee_status;
304 u8 fault_detected; 338 u8 fault_detected;
305 u8 rsrv1; 339 u8 rsrv1;
306 u16 periodic_flags; 340 u16 periodic_flags;
@@ -459,8 +493,7 @@ struct bnx2x_ets_params {
459 struct bnx2x_ets_cos_params cos[DCBX_MAX_NUM_COS]; 493 struct bnx2x_ets_cos_params cos[DCBX_MAX_NUM_COS];
460}; 494};
461 495
462/** 496/* Used to update the PFC attributes in EMAC, BMAC, NIG and BRB
463 * Used to update the PFC attributes in EMAC, BMAC, NIG and BRB
464 * when link is already up 497 * when link is already up
465 */ 498 */
466int bnx2x_update_pfc(struct link_params *params, 499int bnx2x_update_pfc(struct link_params *params,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index f755a665dab3..08eca3ff7db6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -74,6 +74,8 @@
74#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw" 74#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
75#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw" 75#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
76 76
77#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
78
77/* Time in jiffies before concluding the transmitter is hung */ 79/* Time in jiffies before concluding the transmitter is hung */
78#define TX_TIMEOUT (5*HZ) 80#define TX_TIMEOUT (5*HZ)
79 81
@@ -104,7 +106,7 @@ MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
104 106
105#define INT_MODE_INTx 1 107#define INT_MODE_INTx 1
106#define INT_MODE_MSI 2 108#define INT_MODE_MSI 2
107static int int_mode; 109int int_mode;
108module_param(int_mode, int, 0); 110module_param(int_mode, int, 0);
109MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X " 111MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
110 "(1 INT#x; 2 MSI)"); 112 "(1 INT#x; 2 MSI)");
@@ -758,7 +760,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
758 /* Tx */ 760 /* Tx */
759 for_each_cos_in_tx_queue(fp, cos) 761 for_each_cos_in_tx_queue(fp, cos)
760 { 762 {
761 txdata = fp->txdata[cos]; 763 txdata = *fp->txdata_ptr[cos];
762 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n", 764 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n",
763 i, txdata.tx_pkt_prod, 765 i, txdata.tx_pkt_prod,
764 txdata.tx_pkt_cons, txdata.tx_bd_prod, 766 txdata.tx_pkt_cons, txdata.tx_bd_prod,
@@ -876,7 +878,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
876 for_each_tx_queue(bp, i) { 878 for_each_tx_queue(bp, i) {
877 struct bnx2x_fastpath *fp = &bp->fp[i]; 879 struct bnx2x_fastpath *fp = &bp->fp[i];
878 for_each_cos_in_tx_queue(fp, cos) { 880 for_each_cos_in_tx_queue(fp, cos) {
879 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; 881 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
880 882
881 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10); 883 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
882 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245); 884 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
@@ -1583,7 +1585,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1583 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); 1585 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1584 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); 1586 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1585 enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX; 1587 enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
1586 struct bnx2x_queue_sp_obj *q_obj = &fp->q_obj; 1588 struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
1587 1589
1588 DP(BNX2X_MSG_SP, 1590 DP(BNX2X_MSG_SP,
1589 "fp %d cid %d got ramrod #%d state is %x type is %d\n", 1591 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
@@ -1710,7 +1712,7 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1710 /* Handle Rx or Tx according to SB id */ 1712 /* Handle Rx or Tx according to SB id */
1711 prefetch(fp->rx_cons_sb); 1713 prefetch(fp->rx_cons_sb);
1712 for_each_cos_in_tx_queue(fp, cos) 1714 for_each_cos_in_tx_queue(fp, cos)
1713 prefetch(fp->txdata[cos].tx_cons_sb); 1715 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1714 prefetch(&fp->sb_running_index[SM_RX_ID]); 1716 prefetch(&fp->sb_running_index[SM_RX_ID]);
1715 napi_schedule(&bnx2x_fp(bp, fp->index, napi)); 1717 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1716 status &= ~mask; 1718 status &= ~mask;
@@ -2124,6 +2126,11 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2124 } 2126 }
2125 } 2127 }
2126 2128
2129 if (load_mode == LOAD_LOOPBACK_EXT) {
2130 struct link_params *lp = &bp->link_params;
2131 lp->loopback_mode = LOOPBACK_EXT;
2132 }
2133
2127 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); 2134 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2128 2135
2129 bnx2x_release_phy_lock(bp); 2136 bnx2x_release_phy_lock(bp);
@@ -2916,7 +2923,7 @@ static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
2916 struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init, 2923 struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
2917 u8 cos) 2924 u8 cos)
2918{ 2925{
2919 txq_init->dscr_map = fp->txdata[cos].tx_desc_mapping; 2926 txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
2920 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; 2927 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
2921 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; 2928 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2922 txq_init->fw_sb_id = fp->fw_sb_id; 2929 txq_init->fw_sb_id = fp->fw_sb_id;
@@ -3030,9 +3037,9 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
3030 memcpy(ether_stat->version, DRV_MODULE_VERSION, 3037 memcpy(ether_stat->version, DRV_MODULE_VERSION,
3031 ETH_STAT_INFO_VERSION_LEN - 1); 3038 ETH_STAT_INFO_VERSION_LEN - 1);
3032 3039
3033 bp->fp[0].mac_obj.get_n_elements(bp, &bp->fp[0].mac_obj, 3040 bp->sp_objs[0].mac_obj.get_n_elements(bp, &bp->sp_objs[0].mac_obj,
3034 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, 3041 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
3035 ether_stat->mac_local); 3042 ether_stat->mac_local);
3036 3043
3037 ether_stat->mtu_size = bp->dev->mtu; 3044 ether_stat->mtu_size = bp->dev->mtu;
3038 3045
@@ -3055,7 +3062,8 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3055 struct fcoe_stats_info *fcoe_stat = 3062 struct fcoe_stats_info *fcoe_stat =
3056 &bp->slowpath->drv_info_to_mcp.fcoe_stat; 3063 &bp->slowpath->drv_info_to_mcp.fcoe_stat;
3057 3064
3058 memcpy(fcoe_stat->mac_local, bp->fip_mac, ETH_ALEN); 3065 memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT,
3066 bp->fip_mac, ETH_ALEN);
3059 3067
3060 fcoe_stat->qos_priority = 3068 fcoe_stat->qos_priority =
3061 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE]; 3069 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
@@ -3063,11 +3071,11 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3063 /* insert FCoE stats from ramrod response */ 3071 /* insert FCoE stats from ramrod response */
3064 if (!NO_FCOE(bp)) { 3072 if (!NO_FCOE(bp)) {
3065 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats = 3073 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
3066 &bp->fw_stats_data->queue_stats[FCOE_IDX]. 3074 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3067 tstorm_queue_statistics; 3075 tstorm_queue_statistics;
3068 3076
3069 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats = 3077 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
3070 &bp->fw_stats_data->queue_stats[FCOE_IDX]. 3078 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3071 xstorm_queue_statistics; 3079 xstorm_queue_statistics;
3072 3080
3073 struct fcoe_statistics_params *fw_fcoe_stat = 3081 struct fcoe_statistics_params *fw_fcoe_stat =
@@ -3146,7 +3154,8 @@ static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3146 struct iscsi_stats_info *iscsi_stat = 3154 struct iscsi_stats_info *iscsi_stat =
3147 &bp->slowpath->drv_info_to_mcp.iscsi_stat; 3155 &bp->slowpath->drv_info_to_mcp.iscsi_stat;
3148 3156
3149 memcpy(iscsi_stat->mac_local, bp->cnic_eth_dev.iscsi_mac, ETH_ALEN); 3157 memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT,
3158 bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
3150 3159
3151 iscsi_stat->qos_priority = 3160 iscsi_stat->qos_priority =
3152 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI]; 3161 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
@@ -3176,6 +3185,12 @@ static void bnx2x_set_mf_bw(struct bnx2x *bp)
3176 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0); 3185 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
3177} 3186}
3178 3187
3188static void bnx2x_handle_eee_event(struct bnx2x *bp)
3189{
3190 DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
3191 bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
3192}
3193
3179static void bnx2x_handle_drv_info_req(struct bnx2x *bp) 3194static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
3180{ 3195{
3181 enum drv_info_opcode op_code; 3196 enum drv_info_opcode op_code;
@@ -3742,6 +3757,8 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3742 if (val & DRV_STATUS_AFEX_EVENT_MASK) 3757 if (val & DRV_STATUS_AFEX_EVENT_MASK)
3743 bnx2x_handle_afex_cmd(bp, 3758 bnx2x_handle_afex_cmd(bp,
3744 val & DRV_STATUS_AFEX_EVENT_MASK); 3759 val & DRV_STATUS_AFEX_EVENT_MASK);
3760 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
3761 bnx2x_handle_eee_event(bp);
3745 if (bp->link_vars.periodic_flags & 3762 if (bp->link_vars.periodic_flags &
3746 PERIODIC_FLAGS_LINK_EVENT) { 3763 PERIODIC_FLAGS_LINK_EVENT) {
3747 /* sync with link */ 3764 /* sync with link */
@@ -4615,11 +4632,11 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
4615 case BNX2X_FILTER_MAC_PENDING: 4632 case BNX2X_FILTER_MAC_PENDING:
4616 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n"); 4633 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
4617#ifdef BCM_CNIC 4634#ifdef BCM_CNIC
4618 if (cid == BNX2X_ISCSI_ETH_CID) 4635 if (cid == BNX2X_ISCSI_ETH_CID(bp))
4619 vlan_mac_obj = &bp->iscsi_l2_mac_obj; 4636 vlan_mac_obj = &bp->iscsi_l2_mac_obj;
4620 else 4637 else
4621#endif 4638#endif
4622 vlan_mac_obj = &bp->fp[cid].mac_obj; 4639 vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
4623 4640
4624 break; 4641 break;
4625 case BNX2X_FILTER_MCAST_PENDING: 4642 case BNX2X_FILTER_MCAST_PENDING:
@@ -4717,7 +4734,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
4717 for_each_eth_queue(bp, q) { 4734 for_each_eth_queue(bp, q) {
4718 /* Set the appropriate Queue object */ 4735 /* Set the appropriate Queue object */
4719 fp = &bp->fp[q]; 4736 fp = &bp->fp[q];
4720 queue_params.q_obj = &fp->q_obj; 4737 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
4721 4738
4722 /* send the ramrod */ 4739 /* send the ramrod */
4723 rc = bnx2x_queue_state_change(bp, &queue_params); 4740 rc = bnx2x_queue_state_change(bp, &queue_params);
@@ -4728,8 +4745,8 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
4728 4745
4729#ifdef BCM_CNIC 4746#ifdef BCM_CNIC
4730 if (!NO_FCOE(bp)) { 4747 if (!NO_FCOE(bp)) {
4731 fp = &bp->fp[FCOE_IDX]; 4748 fp = &bp->fp[FCOE_IDX(bp)];
4732 queue_params.q_obj = &fp->q_obj; 4749 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
4733 4750
4734 /* clear pending completion bit */ 4751 /* clear pending completion bit */
4735 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags); 4752 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
@@ -4761,11 +4778,11 @@ static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
4761{ 4778{
4762 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid); 4779 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
4763#ifdef BCM_CNIC 4780#ifdef BCM_CNIC
4764 if (cid == BNX2X_FCOE_ETH_CID) 4781 if (cid == BNX2X_FCOE_ETH_CID(bp))
4765 return &bnx2x_fcoe(bp, q_obj); 4782 return &bnx2x_fcoe_sp_obj(bp, q_obj);
4766 else 4783 else
4767#endif 4784#endif
4768 return &bnx2x_fp(bp, CID_TO_FP(cid), q_obj); 4785 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
4769} 4786}
4770 4787
4771static void bnx2x_eq_int(struct bnx2x *bp) 4788static void bnx2x_eq_int(struct bnx2x *bp)
@@ -5647,15 +5664,15 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
5647 5664
5648 /* init tx data */ 5665 /* init tx data */
5649 for_each_cos_in_tx_queue(fp, cos) { 5666 for_each_cos_in_tx_queue(fp, cos) {
5650 bnx2x_init_txdata(bp, &fp->txdata[cos], 5667 bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
5651 CID_COS_TO_TX_ONLY_CID(fp->cid, cos), 5668 CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
5652 FP_COS_TO_TXQ(fp, cos), 5669 FP_COS_TO_TXQ(fp, cos, bp),
5653 BNX2X_TX_SB_INDEX_BASE + cos); 5670 BNX2X_TX_SB_INDEX_BASE + cos, fp);
5654 cids[cos] = fp->txdata[cos].cid; 5671 cids[cos] = fp->txdata_ptr[cos]->cid;
5655 } 5672 }
5656 5673
5657 bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, cids, fp->max_cos, 5674 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
5658 BP_FUNC(bp), bnx2x_sp(bp, q_rdata), 5675 fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
5659 bnx2x_sp_mapping(bp, q_rdata), q_type); 5676 bnx2x_sp_mapping(bp, q_rdata), q_type);
5660 5677
5661 /** 5678 /**
@@ -5706,7 +5723,7 @@ static void bnx2x_init_tx_rings(struct bnx2x *bp)
5706 5723
5707 for_each_tx_queue(bp, i) 5724 for_each_tx_queue(bp, i)
5708 for_each_cos_in_tx_queue(&bp->fp[i], cos) 5725 for_each_cos_in_tx_queue(&bp->fp[i], cos)
5709 bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]); 5726 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
5710} 5727}
5711 5728
5712void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) 5729void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
@@ -7055,12 +7072,10 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
7055 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 7072 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7056 7073
7057 for (i = 0; i < L2_ILT_LINES(bp); i++) { 7074 for (i = 0; i < L2_ILT_LINES(bp); i++) {
7058 ilt->lines[cdu_ilt_start + i].page = 7075 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
7059 bp->context.vcxt + (ILT_PAGE_CIDS * i);
7060 ilt->lines[cdu_ilt_start + i].page_mapping = 7076 ilt->lines[cdu_ilt_start + i].page_mapping =
7061 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i); 7077 bp->context[i].cxt_mapping;
7062 /* cdu ilt pages are allocated manually so there's no need to 7078 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
7063 set the size */
7064 } 7079 }
7065 bnx2x_ilt_init_op(bp, INITOP_SET); 7080 bnx2x_ilt_init_op(bp, INITOP_SET);
7066 7081
@@ -7327,6 +7342,8 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
7327 7342
7328void bnx2x_free_mem(struct bnx2x *bp) 7343void bnx2x_free_mem(struct bnx2x *bp)
7329{ 7344{
7345 int i;
7346
7330 /* fastpath */ 7347 /* fastpath */
7331 bnx2x_free_fp_mem(bp); 7348 bnx2x_free_fp_mem(bp);
7332 /* end of fastpath */ 7349 /* end of fastpath */
@@ -7340,9 +7357,9 @@ void bnx2x_free_mem(struct bnx2x *bp)
7340 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, 7357 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
7341 sizeof(struct bnx2x_slowpath)); 7358 sizeof(struct bnx2x_slowpath));
7342 7359
7343 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping, 7360 for (i = 0; i < L2_ILT_LINES(bp); i++)
7344 bp->context.size); 7361 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping,
7345 7362 bp->context[i].size);
7346 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE); 7363 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
7347 7364
7348 BNX2X_FREE(bp->ilt->lines); 7365 BNX2X_FREE(bp->ilt->lines);
@@ -7428,6 +7445,8 @@ alloc_mem_err:
7428 7445
7429int bnx2x_alloc_mem(struct bnx2x *bp) 7446int bnx2x_alloc_mem(struct bnx2x *bp)
7430{ 7447{
7448 int i, allocated, context_size;
7449
7431#ifdef BCM_CNIC 7450#ifdef BCM_CNIC
7432 if (!CHIP_IS_E1x(bp)) 7451 if (!CHIP_IS_E1x(bp))
7433 /* size = the status block + ramrod buffers */ 7452 /* size = the status block + ramrod buffers */
@@ -7457,11 +7476,29 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
7457 if (bnx2x_alloc_fw_stats_mem(bp)) 7476 if (bnx2x_alloc_fw_stats_mem(bp))
7458 goto alloc_mem_err; 7477 goto alloc_mem_err;
7459 7478
7460 bp->context.size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp); 7479 /* Allocate memory for CDU context:
7461 7480 * This memory is allocated separately and not in the generic ILT
7462 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping, 7481 * functions because CDU differs in few aspects:
7463 bp->context.size); 7482 * 1. There are multiple entities allocating memory for context -
7483 * 'regular' driver, CNIC and SRIOV driver. Each separately controls
7484 * its own ILT lines.
7485 * 2. Since CDU page-size is not a single 4KB page (which is the case
7486 * for the other ILT clients), to be efficient we want to support
7487 * allocation of sub-page-size in the last entry.
7488 * 3. Context pointers are used by the driver to pass to FW / update
7489 * the context (for the other ILT clients the pointers are used just to
7490 * free the memory during unload).
7491 */
7492 context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
7464 7493
7494 for (i = 0, allocated = 0; allocated < context_size; i++) {
7495 bp->context[i].size = min(CDU_ILT_PAGE_SZ,
7496 (context_size - allocated));
7497 BNX2X_PCI_ALLOC(bp->context[i].vcxt,
7498 &bp->context[i].cxt_mapping,
7499 bp->context[i].size);
7500 allocated += bp->context[i].size;
7501 }
7465 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES); 7502 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
7466 7503
7467 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC)) 7504 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
@@ -7563,8 +7600,8 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
7563 7600
7564 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 7601 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
7565 /* Eth MAC is set on RSS leading client (fp[0]) */ 7602 /* Eth MAC is set on RSS leading client (fp[0]) */
7566 return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->fp->mac_obj, set, 7603 return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->sp_objs->mac_obj,
7567 BNX2X_ETH_MAC, &ramrod_flags); 7604 set, BNX2X_ETH_MAC, &ramrod_flags);
7568} 7605}
7569 7606
7570int bnx2x_setup_leading(struct bnx2x *bp) 7607int bnx2x_setup_leading(struct bnx2x *bp)
@@ -7579,7 +7616,7 @@ int bnx2x_setup_leading(struct bnx2x *bp)
7579 * 7616 *
7580 * In case of MSI-X it will also try to enable MSI-X. 7617 * In case of MSI-X it will also try to enable MSI-X.
7581 */ 7618 */
7582static void __devinit bnx2x_set_int_mode(struct bnx2x *bp) 7619void bnx2x_set_int_mode(struct bnx2x *bp)
7583{ 7620{
7584 switch (int_mode) { 7621 switch (int_mode) {
7585 case INT_MODE_MSI: 7622 case INT_MODE_MSI:
@@ -7590,11 +7627,6 @@ static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
7590 BNX2X_DEV_INFO("set number of queues to 1\n"); 7627 BNX2X_DEV_INFO("set number of queues to 1\n");
7591 break; 7628 break;
7592 default: 7629 default:
7593 /* Set number of queues for MSI-X mode */
7594 bnx2x_set_num_queues(bp);
7595
7596 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
7597
7598 /* if we can't use MSI-X we only need one fp, 7630 /* if we can't use MSI-X we only need one fp,
7599 * so try to enable MSI-X with the requested number of fp's 7631 * so try to enable MSI-X with the requested number of fp's
7600 * and fallback to MSI or legacy INTx with one fp 7632 * and fallback to MSI or legacy INTx with one fp
@@ -7735,6 +7767,8 @@ static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
7735{ 7767{
7736 7768
7737 u8 cos; 7769 u8 cos;
7770 int cxt_index, cxt_offset;
7771
7738 /* FCoE Queue uses Default SB, thus has no HC capabilities */ 7772 /* FCoE Queue uses Default SB, thus has no HC capabilities */
7739 if (!IS_FCOE_FP(fp)) { 7773 if (!IS_FCOE_FP(fp)) {
7740 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags); 7774 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
@@ -7771,9 +7805,13 @@ static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
7771 fp->index, init_params->max_cos); 7805 fp->index, init_params->max_cos);
7772 7806
7773 /* set the context pointers queue object */ 7807 /* set the context pointers queue object */
7774 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) 7808 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
7809 cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS;
7810 cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index *
7811 ILT_PAGE_CIDS);
7775 init_params->cxts[cos] = 7812 init_params->cxts[cos] =
7776 &bp->context.vcxt[fp->txdata[cos].cid].eth; 7813 &bp->context[cxt_index].vcxt[cxt_offset].eth;
7814 }
7777} 7815}
7778 7816
7779int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp, 7817int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
@@ -7838,7 +7876,7 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
7838 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, 7876 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
7839 IGU_INT_ENABLE, 0); 7877 IGU_INT_ENABLE, 0);
7840 7878
7841 q_params.q_obj = &fp->q_obj; 7879 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
7842 /* We want to wait for completion in this context */ 7880 /* We want to wait for completion in this context */
7843 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 7881 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
7844 7882
@@ -7911,7 +7949,7 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index)
7911 7949
7912 DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid); 7950 DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
7913 7951
7914 q_params.q_obj = &fp->q_obj; 7952 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
7915 /* We want to wait for completion in this context */ 7953 /* We want to wait for completion in this context */
7916 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 7954 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
7917 7955
@@ -7922,7 +7960,7 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index)
7922 tx_index++){ 7960 tx_index++){
7923 7961
7924 /* ascertain this is a normal queue*/ 7962 /* ascertain this is a normal queue*/
7925 txdata = &fp->txdata[tx_index]; 7963 txdata = fp->txdata_ptr[tx_index];
7926 7964
7927 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n", 7965 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n",
7928 txdata->txq_index); 7966 txdata->txq_index);
@@ -8289,7 +8327,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
8289 struct bnx2x_fastpath *fp = &bp->fp[i]; 8327 struct bnx2x_fastpath *fp = &bp->fp[i];
8290 8328
8291 for_each_cos_in_tx_queue(fp, cos) 8329 for_each_cos_in_tx_queue(fp, cos)
8292 rc = bnx2x_clean_tx_queue(bp, &fp->txdata[cos]); 8330 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
8293#ifdef BNX2X_STOP_ON_ERROR 8331#ifdef BNX2X_STOP_ON_ERROR
8294 if (rc) 8332 if (rc)
8295 return; 8333 return;
@@ -8300,12 +8338,13 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
8300 usleep_range(1000, 1000); 8338 usleep_range(1000, 1000);
8301 8339
8302 /* Clean all ETH MACs */ 8340 /* Clean all ETH MACs */
8303 rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_ETH_MAC, false); 8341 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
8342 false);
8304 if (rc < 0) 8343 if (rc < 0)
8305 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc); 8344 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
8306 8345
8307 /* Clean up UC list */ 8346 /* Clean up UC list */
8308 rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_UC_LIST_MAC, 8347 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
8309 true); 8348 true);
8310 if (rc < 0) 8349 if (rc < 0)
8311 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n", 8350 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
@@ -9697,6 +9736,11 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9697 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ? 9736 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
9698 BC_SUPPORTS_PFC_STATS : 0; 9737 BC_SUPPORTS_PFC_STATS : 0;
9699 9738
9739 bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ?
9740 BC_SUPPORTS_FCOE_FEATURES : 0;
9741
9742 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
9743 BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
9700 boot_mode = SHMEM_RD(bp, 9744 boot_mode = SHMEM_RD(bp,
9701 dev_info.port_feature_config[BP_PORT(bp)].mba_config) & 9745 dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
9702 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; 9746 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
@@ -10082,7 +10126,7 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
10082{ 10126{
10083 int port = BP_PORT(bp); 10127 int port = BP_PORT(bp);
10084 u32 config; 10128 u32 config;
10085 u32 ext_phy_type, ext_phy_config; 10129 u32 ext_phy_type, ext_phy_config, eee_mode;
10086 10130
10087 bp->link_params.bp = bp; 10131 bp->link_params.bp = bp;
10088 bp->link_params.port = port; 10132 bp->link_params.port = port;
@@ -10149,6 +10193,19 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
10149 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp, 10193 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
10150 bp->common.shmem_base, 10194 bp->common.shmem_base,
10151 bp->common.shmem2_base); 10195 bp->common.shmem2_base);
10196
10197 /* Configure link feature according to nvram value */
10198 eee_mode = (((SHMEM_RD(bp, dev_info.
10199 port_feature_config[port].eee_power_mode)) &
10200 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
10201 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
10202 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
10203 bp->link_params.eee_mode = EEE_MODE_ADV_LPI |
10204 EEE_MODE_ENABLE_LPI |
10205 EEE_MODE_OUTPUT_TIME;
10206 } else {
10207 bp->link_params.eee_mode = 0;
10208 }
10152} 10209}
10153 10210
10154void bnx2x_get_iscsi_info(struct bnx2x *bp) 10211void bnx2x_get_iscsi_info(struct bnx2x *bp)
@@ -10997,7 +11054,7 @@ static int bnx2x_set_uc_list(struct bnx2x *bp)
10997 int rc; 11054 int rc;
10998 struct net_device *dev = bp->dev; 11055 struct net_device *dev = bp->dev;
10999 struct netdev_hw_addr *ha; 11056 struct netdev_hw_addr *ha;
11000 struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj; 11057 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
11001 unsigned long ramrod_flags = 0; 11058 unsigned long ramrod_flags = 0;
11002 11059
11003 /* First schedule a cleanup up of old configuration */ 11060 /* First schedule a cleanup up of old configuration */
@@ -11503,8 +11560,7 @@ static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11503 } 11560 }
11504} 11561}
11505 11562
11506/** 11563/* IRO array is stored in the following format:
11507 * IRO array is stored in the following format:
11508 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) } 11564 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
11509 */ 11565 */
11510static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n) 11566static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
@@ -11672,7 +11728,7 @@ void bnx2x__init_func_obj(struct bnx2x *bp)
11672/* must be called after sriov-enable */ 11728/* must be called after sriov-enable */
11673static int bnx2x_set_qm_cid_count(struct bnx2x *bp) 11729static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
11674{ 11730{
11675 int cid_count = BNX2X_L2_CID_COUNT(bp); 11731 int cid_count = BNX2X_L2_MAX_CID(bp);
11676 11732
11677#ifdef BCM_CNIC 11733#ifdef BCM_CNIC
11678 cid_count += CNIC_CID_MAX; 11734 cid_count += CNIC_CID_MAX;
@@ -11717,7 +11773,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11717 struct bnx2x *bp; 11773 struct bnx2x *bp;
11718 int pcie_width, pcie_speed; 11774 int pcie_width, pcie_speed;
11719 int rc, max_non_def_sbs; 11775 int rc, max_non_def_sbs;
11720 int rx_count, tx_count, rss_count; 11776 int rx_count, tx_count, rss_count, doorbell_size;
11721 /* 11777 /*
11722 * An estimated maximum supported CoS number according to the chip 11778 * An estimated maximum supported CoS number according to the chip
11723 * version. 11779 * version.
@@ -11760,13 +11816,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11760 11816
11761 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev); 11817 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev);
11762 11818
11763 /* !!! FIXME !!!
11764 * Do not allow the maximum SB count to grow above 16
11765 * since Special CIDs starts from 16*BNX2X_MULTI_TX_COS=48.
11766 * We will use the FP_SB_MAX_E1x macro for this matter.
11767 */
11768 max_non_def_sbs = min_t(int, FP_SB_MAX_E1x, max_non_def_sbs);
11769
11770 WARN_ON(!max_non_def_sbs); 11819 WARN_ON(!max_non_def_sbs);
11771 11820
11772 /* Maximum number of RSS queues: one IGU SB goes to CNIC */ 11821 /* Maximum number of RSS queues: one IGU SB goes to CNIC */
@@ -11777,9 +11826,9 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11777 11826
11778 /* 11827 /*
11779 * Maximum number of netdev Tx queues: 11828 * Maximum number of netdev Tx queues:
11780 * Maximum TSS queues * Maximum supported number of CoS + FCoE L2 11829 * Maximum TSS queues * Maximum supported number of CoS + FCoE L2
11781 */ 11830 */
11782 tx_count = MAX_TXQS_PER_COS * max_cos_est + FCOE_PRESENT; 11831 tx_count = rss_count * max_cos_est + FCOE_PRESENT;
11783 11832
11784 /* dev zeroed in init_etherdev */ 11833 /* dev zeroed in init_etherdev */
11785 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count); 11834 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
@@ -11788,9 +11837,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11788 11837
11789 bp = netdev_priv(dev); 11838 bp = netdev_priv(dev);
11790 11839
11791 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
11792 tx_count, rx_count);
11793
11794 bp->igu_sb_cnt = max_non_def_sbs; 11840 bp->igu_sb_cnt = max_non_def_sbs;
11795 bp->msg_enable = debug; 11841 bp->msg_enable = debug;
11796 pci_set_drvdata(pdev, dev); 11842 pci_set_drvdata(pdev, dev);
@@ -11803,6 +11849,9 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11803 11849
11804 BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs); 11850 BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs);
11805 11851
11852 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
11853 tx_count, rx_count);
11854
11806 rc = bnx2x_init_bp(bp); 11855 rc = bnx2x_init_bp(bp);
11807 if (rc) 11856 if (rc)
11808 goto init_one_exit; 11857 goto init_one_exit;
@@ -11811,9 +11860,15 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11811 * Map doorbels here as we need the real value of bp->max_cos which 11860 * Map doorbels here as we need the real value of bp->max_cos which
11812 * is initialized in bnx2x_init_bp(). 11861 * is initialized in bnx2x_init_bp().
11813 */ 11862 */
11863 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
11864 if (doorbell_size > pci_resource_len(pdev, 2)) {
11865 dev_err(&bp->pdev->dev,
11866 "Cannot map doorbells, bar size too small, aborting\n");
11867 rc = -ENOMEM;
11868 goto init_one_exit;
11869 }
11814 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), 11870 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11815 min_t(u64, BNX2X_DB_SIZE(bp), 11871 doorbell_size);
11816 pci_resource_len(pdev, 2)));
11817 if (!bp->doorbells) { 11872 if (!bp->doorbells) {
11818 dev_err(&bp->pdev->dev, 11873 dev_err(&bp->pdev->dev,
11819 "Cannot map doorbell space, aborting\n"); 11874 "Cannot map doorbell space, aborting\n");
@@ -11831,8 +11886,12 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11831 11886
11832#endif 11887#endif
11833 11888
11889
11890 /* Set bp->num_queues for MSI-X mode*/
11891 bnx2x_set_num_queues(bp);
11892
11834 /* Configure interrupt mode: try to enable MSI-X/MSI if 11893 /* Configure interrupt mode: try to enable MSI-X/MSI if
11835 * needed, set bp->num_queues appropriately. 11894 * needed.
11836 */ 11895 */
11837 bnx2x_set_int_mode(bp); 11896 bnx2x_set_int_mode(bp);
11838 11897
@@ -12176,6 +12235,7 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
12176static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) 12235static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12177{ 12236{
12178 struct eth_spe *spe; 12237 struct eth_spe *spe;
12238 int cxt_index, cxt_offset;
12179 12239
12180#ifdef BNX2X_STOP_ON_ERROR 12240#ifdef BNX2X_STOP_ON_ERROR
12181 if (unlikely(bp->panic)) 12241 if (unlikely(bp->panic))
@@ -12198,10 +12258,16 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12198 * ramrod 12258 * ramrod
12199 */ 12259 */
12200 if (type == ETH_CONNECTION_TYPE) { 12260 if (type == ETH_CONNECTION_TYPE) {
12201 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) 12261 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) {
12202 bnx2x_set_ctx_validation(bp, &bp->context. 12262 cxt_index = BNX2X_ISCSI_ETH_CID(bp) /
12203 vcxt[BNX2X_ISCSI_ETH_CID].eth, 12263 ILT_PAGE_CIDS;
12204 BNX2X_ISCSI_ETH_CID); 12264 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) -
12265 (cxt_index * ILT_PAGE_CIDS);
12266 bnx2x_set_ctx_validation(bp,
12267 &bp->context[cxt_index].
12268 vcxt[cxt_offset].eth,
12269 BNX2X_ISCSI_ETH_CID(bp));
12270 }
12205 } 12271 }
12206 12272
12207 /* 12273 /*
@@ -12488,21 +12554,45 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12488 break; 12554 break;
12489 } 12555 }
12490 case DRV_CTL_ULP_REGISTER_CMD: { 12556 case DRV_CTL_ULP_REGISTER_CMD: {
12491 int ulp_type = ctl->data.ulp_type; 12557 int ulp_type = ctl->data.register_data.ulp_type;
12492 12558
12493 if (CHIP_IS_E3(bp)) { 12559 if (CHIP_IS_E3(bp)) {
12494 int idx = BP_FW_MB_IDX(bp); 12560 int idx = BP_FW_MB_IDX(bp);
12495 u32 cap; 12561 u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
12562 int path = BP_PATH(bp);
12563 int port = BP_PORT(bp);
12564 int i;
12565 u32 scratch_offset;
12566 u32 *host_addr;
12496 12567
12497 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]); 12568 /* first write capability to shmem2 */
12498 if (ulp_type == CNIC_ULP_ISCSI) 12569 if (ulp_type == CNIC_ULP_ISCSI)
12499 cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI; 12570 cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
12500 else if (ulp_type == CNIC_ULP_FCOE) 12571 else if (ulp_type == CNIC_ULP_FCOE)
12501 cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE; 12572 cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
12502 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); 12573 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
12574
12575 if ((ulp_type != CNIC_ULP_FCOE) ||
12576 (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) ||
12577 (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES)))
12578 break;
12579
12580 /* if reached here - should write fcoe capabilities */
12581 scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr);
12582 if (!scratch_offset)
12583 break;
12584 scratch_offset += offsetof(struct glob_ncsi_oem_data,
12585 fcoe_features[path][port]);
12586 host_addr = (u32 *) &(ctl->data.register_data.
12587 fcoe_features);
12588 for (i = 0; i < sizeof(struct fcoe_capabilities);
12589 i += 4)
12590 REG_WR(bp, scratch_offset + i,
12591 *(host_addr + i/4));
12503 } 12592 }
12504 break; 12593 break;
12505 } 12594 }
12595
12506 case DRV_CTL_ULP_UNREGISTER_CMD: { 12596 case DRV_CTL_ULP_UNREGISTER_CMD: {
12507 int ulp_type = ctl->data.ulp_type; 12597 int ulp_type = ctl->data.ulp_type;
12508 12598
@@ -12554,6 +12644,21 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12554 cp->num_irq = 2; 12644 cp->num_irq = 2;
12555} 12645}
12556 12646
12647void bnx2x_setup_cnic_info(struct bnx2x *bp)
12648{
12649 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12650
12651
12652 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
12653 bnx2x_cid_ilt_lines(bp);
12654 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
12655 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
12656 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
12657
12658 if (NO_ISCSI_OOO(bp))
12659 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
12660}
12661
12557static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops, 12662static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12558 void *data) 12663 void *data)
12559{ 12664{
@@ -12632,10 +12737,10 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12632 cp->drv_ctl = bnx2x_drv_ctl; 12737 cp->drv_ctl = bnx2x_drv_ctl;
12633 cp->drv_register_cnic = bnx2x_register_cnic; 12738 cp->drv_register_cnic = bnx2x_register_cnic;
12634 cp->drv_unregister_cnic = bnx2x_unregister_cnic; 12739 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12635 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID; 12740 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
12636 cp->iscsi_l2_client_id = 12741 cp->iscsi_l2_client_id =
12637 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); 12742 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
12638 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID; 12743 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
12639 12744
12640 if (NO_ISCSI_OOO(bp)) 12745 if (NO_ISCSI_OOO(bp))
12641 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; 12746 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
new file mode 100644
index 000000000000..ddd5106ad2f9
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
@@ -0,0 +1,168 @@
1/* bnx2x_mfw_req.h: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2012 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 */
9
10#ifndef BNX2X_MFW_REQ_H
11#define BNX2X_MFW_REQ_H
12
13#define PORT_0 0
14#define PORT_1 1
15#define PORT_MAX 2
16#define NVM_PATH_MAX 2
17
18/* FCoE capabilities required from the driver */
19struct fcoe_capabilities {
20 u32 capability1;
21 /* Maximum number of I/Os per connection */
22 #define FCOE_IOS_PER_CONNECTION_MASK 0x0000ffff
23 #define FCOE_IOS_PER_CONNECTION_SHIFT 0
24 /* Maximum number of Logins per port */
25 #define FCOE_LOGINS_PER_PORT_MASK 0xffff0000
26 #define FCOE_LOGINS_PER_PORT_SHIFT 16
27
28 u32 capability2;
29 /* Maximum number of exchanges */
30 #define FCOE_NUMBER_OF_EXCHANGES_MASK 0x0000ffff
31 #define FCOE_NUMBER_OF_EXCHANGES_SHIFT 0
32 /* Maximum NPIV WWN per port */
33 #define FCOE_NPIV_WWN_PER_PORT_MASK 0xffff0000
34 #define FCOE_NPIV_WWN_PER_PORT_SHIFT 16
35
36 u32 capability3;
37 /* Maximum number of targets supported */
38 #define FCOE_TARGETS_SUPPORTED_MASK 0x0000ffff
39 #define FCOE_TARGETS_SUPPORTED_SHIFT 0
40 /* Maximum number of outstanding commands across all connections */
41 #define FCOE_OUTSTANDING_COMMANDS_MASK 0xffff0000
42 #define FCOE_OUTSTANDING_COMMANDS_SHIFT 16
43
44 u32 capability4;
45 #define FCOE_CAPABILITY4_STATEFUL 0x00000001
46 #define FCOE_CAPABILITY4_STATELESS 0x00000002
47 #define FCOE_CAPABILITY4_CAPABILITIES_REPORTED_VALID 0x00000004
48};
49
50struct glob_ncsi_oem_data {
51 u32 driver_version;
52 u32 unused[3];
53 struct fcoe_capabilities fcoe_features[NVM_PATH_MAX][PORT_MAX];
54};
55
56/* current drv_info version */
57#define DRV_INFO_CUR_VER 2
58
59/* drv_info op codes supported */
60enum drv_info_opcode {
61 ETH_STATS_OPCODE,
62 FCOE_STATS_OPCODE,
63 ISCSI_STATS_OPCODE
64};
65
66#define ETH_STAT_INFO_VERSION_LEN 12
67/* Per PCI Function Ethernet Statistics required from the driver */
68struct eth_stats_info {
69 /* Function's Driver Version. padded to 12 */
70 u8 version[ETH_STAT_INFO_VERSION_LEN];
71 /* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */
72 u8 mac_local[8];
73 u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
74 u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
75 u32 mtu_size; /* MTU Size. Note : Negotiated MTU */
76 u32 feature_flags; /* Feature_Flags. */
77#define FEATURE_ETH_CHKSUM_OFFLOAD_MASK 0x01
78#define FEATURE_ETH_LSO_MASK 0x02
79#define FEATURE_ETH_BOOTMODE_MASK 0x1C
80#define FEATURE_ETH_BOOTMODE_SHIFT 2
81#define FEATURE_ETH_BOOTMODE_NONE (0x0 << 2)
82#define FEATURE_ETH_BOOTMODE_PXE (0x1 << 2)
83#define FEATURE_ETH_BOOTMODE_ISCSI (0x2 << 2)
84#define FEATURE_ETH_BOOTMODE_FCOE (0x3 << 2)
85#define FEATURE_ETH_TOE_MASK 0x20
86 u32 lso_max_size; /* LSO MaxOffloadSize. */
87 u32 lso_min_seg_cnt; /* LSO MinSegmentCount. */
88 /* Num Offloaded Connections TCP_IPv4. */
89 u32 ipv4_ofld_cnt;
90 /* Num Offloaded Connections TCP_IPv6. */
91 u32 ipv6_ofld_cnt;
92 u32 promiscuous_mode; /* Promiscuous Mode. non-zero true */
93 u32 txq_size; /* TX Descriptors Queue Size */
94 u32 rxq_size; /* RX Descriptors Queue Size */
95 /* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */
96 u32 txq_avg_depth;
97 /* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */
98 u32 rxq_avg_depth;
99 /* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/
100 u32 iov_offload;
101 /* Number of NetQueue/VMQ Config'd. */
102 u32 netq_cnt;
103 u32 vf_cnt; /* Num VF assigned to this PF. */
104};
105
106/* Per PCI Function FCOE Statistics required from the driver */
107struct fcoe_stats_info {
108 u8 version[12]; /* Function's Driver Version. */
109 u8 mac_local[8]; /* Locally Admin Addr. */
110 u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
111 u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
112 /* QoS Priority (per 802.1p). 0-7255 */
113 u32 qos_priority;
114 u32 txq_size; /* FCoE TX Descriptors Queue Size. */
115 u32 rxq_size; /* FCoE RX Descriptors Queue Size. */
116 /* FCoE TX Descriptor Queue Avg Depth. */
117 u32 txq_avg_depth;
118 /* FCoE RX Descriptors Queue Avg Depth. */
119 u32 rxq_avg_depth;
120 u32 rx_frames_lo; /* FCoE RX Frames received. */
121 u32 rx_frames_hi; /* FCoE RX Frames received. */
122 u32 rx_bytes_lo; /* FCoE RX Bytes received. */
123 u32 rx_bytes_hi; /* FCoE RX Bytes received. */
124 u32 tx_frames_lo; /* FCoE TX Frames sent. */
125 u32 tx_frames_hi; /* FCoE TX Frames sent. */
126 u32 tx_bytes_lo; /* FCoE TX Bytes sent. */
127 u32 tx_bytes_hi; /* FCoE TX Bytes sent. */
128};
129
130/* Per PCI Function iSCSI Statistics required from the driver*/
131struct iscsi_stats_info {
132 u8 version[12]; /* Function's Driver Version. */
133 u8 mac_local[8]; /* Locally Admin iSCSI MAC Addr. */
134 u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
135 /* QoS Priority (per 802.1p). 0-7255 */
136 u32 qos_priority;
137 u8 initiator_name[64]; /* iSCSI Boot Initiator Node name. */
138 u8 ww_port_name[64]; /* iSCSI World wide port name */
139 u8 boot_target_name[64];/* iSCSI Boot Target Name. */
140 u8 boot_target_ip[16]; /* iSCSI Boot Target IP. */
141 u32 boot_target_portal; /* iSCSI Boot Target Portal. */
142 u8 boot_init_ip[16]; /* iSCSI Boot Initiator IP Address. */
143 u32 max_frame_size; /* Max Frame Size. bytes */
144 u32 txq_size; /* PDU TX Descriptors Queue Size. */
145 u32 rxq_size; /* PDU RX Descriptors Queue Size. */
146 u32 txq_avg_depth; /* PDU TX Descriptor Queue Avg Depth. */
147 u32 rxq_avg_depth; /* PDU RX Descriptors Queue Avg Depth. */
148 u32 rx_pdus_lo; /* iSCSI PDUs received. */
149 u32 rx_pdus_hi; /* iSCSI PDUs received. */
150 u32 rx_bytes_lo; /* iSCSI RX Bytes received. */
151 u32 rx_bytes_hi; /* iSCSI RX Bytes received. */
152 u32 tx_pdus_lo; /* iSCSI PDUs sent. */
153 u32 tx_pdus_hi; /* iSCSI PDUs sent. */
154 u32 tx_bytes_lo; /* iSCSI PDU TX Bytes sent. */
155 u32 tx_bytes_hi; /* iSCSI PDU TX Bytes sent. */
156 u32 pcp_prior_map_tbl; /* C-PCP to S-PCP Priority MapTable.
157 * 9 nibbles, the position of each nibble
158 * represents the C-PCP value, the value
159 * of the nibble = S-PCP value.
160 */
161};
162
163union drv_info_to_mcp {
164 struct eth_stats_info ether_stat;
165 struct fcoe_stats_info fcoe_stat;
166 struct iscsi_stats_info iscsi_stat;
167};
168#endif /* BNX2X_MFW_REQ_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index bbd387492a80..ec62a5c8bd37 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -1488,6 +1488,121 @@
1488 * 2:1 - otp_misc_do[51:50]; 0 - otp_misc_do[1]. */ 1488 * 2:1 - otp_misc_do[51:50]; 0 - otp_misc_do[1]. */
1489#define MISC_REG_CHIP_TYPE 0xac60 1489#define MISC_REG_CHIP_TYPE 0xac60
1490#define MISC_REG_CHIP_TYPE_57811_MASK (1<<1) 1490#define MISC_REG_CHIP_TYPE_57811_MASK (1<<1)
1491#define MISC_REG_CPMU_LP_DR_ENABLE 0xa858
1492/* [RW 1] FW EEE LPI Enable. When 1 indicates that EEE LPI mode is enabled
1493 * by FW. When 0 indicates that the EEE LPI mode is disabled by FW. Clk
1494 * 25MHz. Reset on hard reset. */
1495#define MISC_REG_CPMU_LP_FW_ENABLE_P0 0xa84c
1496/* [RW 32] EEE LPI Idle Threshold. The threshold value for the idle EEE LPI
1497 * counter. Timer tick is 1 us. Clock 25MHz. Reset on hard reset. */
1498#define MISC_REG_CPMU_LP_IDLE_THR_P0 0xa8a0
1499/* [RW 18] LPI entry events mask. [0] - Vmain SM Mask. When 1 indicates that
1500 * the Vmain SM end state is disabled. When 0 indicates that the Vmain SM
1501 * end state is enabled. [1] - FW Queues Empty Mask. When 1 indicates that
1502 * the FW command that all Queues are empty is disabled. When 0 indicates
1503 * that the FW command that all Queues are empty is enabled. [2] - FW Early
1504 * Exit Mask / Reserved (Entry mask). When 1 indicates that the FW Early
1505 * Exit command is disabled. When 0 indicates that the FW Early Exit command
1506 * is enabled. This bit applicable only in the EXIT Events Mask registers.
1507 * [3] - PBF Request Mask. When 1 indicates that the PBF Request indication
1508 * is disabled. When 0 indicates that the PBF Request indication is enabled.
1509 * [4] - Tx Request Mask. When =1 indicates that the Tx other Than PBF
1510 * Request indication is disabled. When 0 indicates that the Tx Other Than
1511 * PBF Request indication is enabled. [5] - Rx EEE LPI Status Mask. When 1
1512 * indicates that the RX EEE LPI Status indication is disabled. When 0
1513 * indicates that the RX EEE LPI Status indication is enabled. In the EXIT
1514 * Events Masks registers; this bit masks the falling edge detect of the LPI
1515 * Status (Rx LPI is on - off). [6] - Tx Pause Mask. When 1 indicates that
1516 * the Tx Pause indication is disabled. When 0 indicates that the Tx Pause
1517 * indication is enabled. [7] - BRB1 Empty Mask. When 1 indicates that the
1518 * BRB1 EMPTY indication is disabled. When 0 indicates that the BRB1 EMPTY
1519 * indication is enabled. [8] - QM Idle Mask. When 1 indicates that the QM
1520 * IDLE indication is disabled. When 0 indicates that the QM IDLE indication
1521 * is enabled. (One bit for both VOQ0 and VOQ1). [9] - QM LB Idle Mask. When
1522 * 1 indicates that the QM IDLE indication for LOOPBACK is disabled. When 0
1523 * indicates that the QM IDLE indication for LOOPBACK is enabled. [10] - L1
1524 * Status Mask. When 1 indicates that the L1 Status indication from the PCIE
1525 * CORE is disabled. When 0 indicates that the RX EEE LPI Status indication
1526 * from the PCIE CORE is enabled. In the EXIT Events Masks registers; this
1527 * bit masks the falling edge detect of the L1 status (L1 is on - off). [11]
1528 * - P0 E0 EEE EEE LPI REQ Mask. When =1 indicates that the P0 E0 EEE EEE
1529 * LPI REQ indication is disabled. When =0 indicates that the P0 E0 EEE LPI
1530 * REQ indication is enabled. [12] - P1 E0 EEE LPI REQ Mask. When =1
1531 * indicates that the P0 EEE LPI REQ indication is disabled. When =0
1532 * indicates that the P0 EEE LPI REQ indication is enabled. [13] - P0 E1 EEE
1533 * LPI REQ Mask. When =1 indicates that the P0 EEE LPI REQ indication is
1534 * disabled. When =0 indicates that the P0 EEE LPI REQ indication is
1535 * enabled. [14] - P1 E1 EEE LPI REQ Mask. When =1 indicates that the P0 EEE
1536 * LPI REQ indication is disabled. When =0 indicates that the P0 EEE LPI REQ
1537 * indication is enabled. [15] - L1 REQ Mask. When =1 indicates that the L1
1538 * REQ indication is disabled. When =0 indicates that the L1 indication is
1539 * enabled. [16] - Rx EEE LPI Status Edge Detect Mask. When =1 indicates
1540 * that the RX EEE LPI Status Falling Edge Detect indication is disabled (Rx
1541 * EEE LPI is on - off). When =0 indicates that the RX EEE LPI Status
1542 * Falling Edge Detec indication is enabled (Rx EEE LPI is on - off). This
1543 * bit is applicable only in the EXIT Events Masks registers. [17] - L1
1544 * Status Edge Detect Mask. When =1 indicates that the L1 Status Falling
1545 * Edge Detect indication from the PCIE CORE is disabled (L1 is on - off).
1546 * When =0 indicates that the L1 Status Falling Edge Detect indication from
1547 * the PCIE CORE is enabled (L1 is on - off). This bit is applicable only in
1548 * the EXIT Events Masks registers. Clock 25MHz. Reset on hard reset. */
1549#define MISC_REG_CPMU_LP_MASK_ENT_P0 0xa880
1550/* [RW 18] EEE LPI exit events mask. [0] - Vmain SM Mask. When 1 indicates
1551 * that the Vmain SM end state is disabled. When 0 indicates that the Vmain
1552 * SM end state is enabled. [1] - FW Queues Empty Mask. When 1 indicates
1553 * that the FW command that all Queues are empty is disabled. When 0
1554 * indicates that the FW command that all Queues are empty is enabled. [2] -
1555 * FW Early Exit Mask / Reserved (Entry mask). When 1 indicates that the FW
1556 * Early Exit command is disabled. When 0 indicates that the FW Early Exit
1557 * command is enabled. This bit applicable only in the EXIT Events Mask
1558 * registers. [3] - PBF Request Mask. When 1 indicates that the PBF Request
1559 * indication is disabled. When 0 indicates that the PBF Request indication
1560 * is enabled. [4] - Tx Request Mask. When =1 indicates that the Tx other
1561 * Than PBF Request indication is disabled. When 0 indicates that the Tx
1562 * Other Than PBF Request indication is enabled. [5] - Rx EEE LPI Status
1563 * Mask. When 1 indicates that the RX EEE LPI Status indication is disabled.
1564 * When 0 indicates that the RX LPI Status indication is enabled. In the
1565 * EXIT Events Masks registers; this bit masks the falling edge detect of
1566 * the EEE LPI Status (Rx EEE LPI is on - off). [6] - Tx Pause Mask. When 1
1567 * indicates that the Tx Pause indication is disabled. When 0 indicates that
1568 * the Tx Pause indication is enabled. [7] - BRB1 Empty Mask. When 1
1569 * indicates that the BRB1 EMPTY indication is disabled. When 0 indicates
1570 * that the BRB1 EMPTY indication is enabled. [8] - QM Idle Mask. When 1
1571 * indicates that the QM IDLE indication is disabled. When 0 indicates that
1572 * the QM IDLE indication is enabled. (One bit for both VOQ0 and VOQ1). [9]
1573 * - QM LB Idle Mask. When 1 indicates that the QM IDLE indication for
1574 * LOOPBACK is disabled. When 0 indicates that the QM IDLE indication for
1575 * LOOPBACK is enabled. [10] - L1 Status Mask. When 1 indicates that the L1
1576 * Status indication from the PCIE CORE is disabled. When 0 indicates that
1577 * the RX EEE LPI Status indication from the PCIE CORE is enabled. In the
1578 * EXIT Events Masks registers; this bit masks the falling edge detect of
1579 * the L1 status (L1 is on - off). [11] - P0 E0 EEE EEE LPI REQ Mask. When
1580 * =1 indicates that the P0 E0 EEE EEE LPI REQ indication is disabled. When
1581 * =0 indicates that the P0 E0 EEE LPI REQ indication is enabled. [12] - P1
1582 * E0 EEE LPI REQ Mask. When =1 indicates that the P0 EEE LPI REQ indication
1583 * is disabled. When =0 indicates that the P0 EEE LPI REQ indication is
1584 * enabled. [13] - P0 E1 EEE LPI REQ Mask. When =1 indicates that the P0 EEE
1585 * LPI REQ indication is disabled. When =0 indicates that the P0 EEE LPI REQ
1586 * indication is enabled. [14] - P1 E1 EEE LPI REQ Mask. When =1 indicates
1587 * that the P0 EEE LPI REQ indication is disabled. When =0 indicates that
1588 * the P0 EEE LPI REQ indication is enabled. [15] - L1 REQ Mask. When =1
1589 * indicates that the L1 REQ indication is disabled. When =0 indicates that
1590 * the L1 indication is enabled. [16] - Rx EEE LPI Status Edge Detect Mask.
1591 * When =1 indicates that the RX EEE LPI Status Falling Edge Detect
1592 * indication is disabled (Rx EEE LPI is on - off). When =0 indicates that
1593 * the RX EEE LPI Status Falling Edge Detec indication is enabled (Rx EEE
1594 * LPI is on - off). This bit is applicable only in the EXIT Events Masks
1595 * registers. [17] - L1 Status Edge Detect Mask. When =1 indicates that the
1596 * L1 Status Falling Edge Detect indication from the PCIE CORE is disabled
1597 * (L1 is on - off). When =0 indicates that the L1 Status Falling Edge
1598 * Detect indication from the PCIE CORE is enabled (L1 is on - off). This
1599 * bit is applicable only in the EXIT Events Masks registers.Clock 25MHz.
1600 * Reset on hard reset. */
1601#define MISC_REG_CPMU_LP_MASK_EXT_P0 0xa888
1602/* [RW 16] EEE LPI Entry Events Counter. A statistic counter with the number
1603 * of counts that the SM entered the EEE LPI state. Clock 25MHz. Read only
1604 * register. Reset on hard reset. */
1605#define MISC_REG_CPMU_LP_SM_ENT_CNT_P0 0xa8b8
1491/* [RW 32] The following driver registers(1...16) represent 16 drivers and 1606/* [RW 32] The following driver registers(1...16) represent 16 drivers and
1492 32 clients. Each client can be controlled by one driver only. One in each 1607 32 clients. Each client can be controlled by one driver only. One in each
1493 bit represent that this driver control the appropriate client (Ex: bit 5 1608 bit represent that this driver control the appropriate client (Ex: bit 5
@@ -5372,6 +5487,8 @@
5372/* [RW 32] Lower 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC 5487/* [RW 32] Lower 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC
5373 * packets transmitted by the MAC */ 5488 * packets transmitted by the MAC */
5374#define XMAC_REG_CTRL_SA_LO 0x28 5489#define XMAC_REG_CTRL_SA_LO 0x28
5490#define XMAC_REG_EEE_CTRL 0xd8
5491#define XMAC_REG_EEE_TIMERS_HI 0xe4
5375#define XMAC_REG_PAUSE_CTRL 0x68 5492#define XMAC_REG_PAUSE_CTRL 0x68
5376#define XMAC_REG_PFC_CTRL 0x70 5493#define XMAC_REG_PFC_CTRL 0x70
5377#define XMAC_REG_PFC_CTRL_HI 0x74 5494#define XMAC_REG_PFC_CTRL_HI 0x74
@@ -5796,6 +5913,7 @@
5796#define MISC_REGISTERS_SPIO_OUTPUT_LOW 0 5913#define MISC_REGISTERS_SPIO_OUTPUT_LOW 0
5797#define MISC_REGISTERS_SPIO_SET_POS 8 5914#define MISC_REGISTERS_SPIO_SET_POS 8
5798#define HW_LOCK_MAX_RESOURCE_VALUE 31 5915#define HW_LOCK_MAX_RESOURCE_VALUE 31
5916#define HW_LOCK_RESOURCE_DCBX_ADMIN_MIB 13
5799#define HW_LOCK_RESOURCE_DRV_FLAGS 10 5917#define HW_LOCK_RESOURCE_DRV_FLAGS 10
5800#define HW_LOCK_RESOURCE_GPIO 1 5918#define HW_LOCK_RESOURCE_GPIO 1
5801#define HW_LOCK_RESOURCE_MDIO 0 5919#define HW_LOCK_RESOURCE_MDIO 0
@@ -6813,6 +6931,8 @@ Theotherbitsarereservedandshouldbezero*/
6813#define MDIO_AN_REG_LP_AUTO_NEG 0x0013 6931#define MDIO_AN_REG_LP_AUTO_NEG 0x0013
6814#define MDIO_AN_REG_LP_AUTO_NEG2 0x0014 6932#define MDIO_AN_REG_LP_AUTO_NEG2 0x0014
6815#define MDIO_AN_REG_MASTER_STATUS 0x0021 6933#define MDIO_AN_REG_MASTER_STATUS 0x0021
6934#define MDIO_AN_REG_EEE_ADV 0x003c
6935#define MDIO_AN_REG_LP_EEE_ADV 0x003d
6816/*bcm*/ 6936/*bcm*/
6817#define MDIO_AN_REG_LINK_STATUS 0x8304 6937#define MDIO_AN_REG_LINK_STATUS 0x8304
6818#define MDIO_AN_REG_CL37_CL73 0x8370 6938#define MDIO_AN_REG_CL37_CL73 0x8370
@@ -6866,6 +6986,8 @@ Theotherbitsarereservedandshouldbezero*/
6866#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080 6986#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
6867 6987
6868/* BCM84833 only */ 6988/* BCM84833 only */
6989#define MDIO_84833_TOP_CFG_FW_REV 0x400f
6990#define MDIO_84833_TOP_CFG_FW_EEE 0x10b1
6869#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a 6991#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a
6870#define MDIO_84833_SUPER_ISOLATE 0x8000 6992#define MDIO_84833_SUPER_ISOLATE 0x8000
6871/* These are mailbox register set used by 84833. */ 6993/* These are mailbox register set used by 84833. */
@@ -6993,11 +7115,13 @@ Theotherbitsarereservedandshouldbezero*/
6993#define MDIO_WC_REG_DIGITAL3_UP1 0x8329 7115#define MDIO_WC_REG_DIGITAL3_UP1 0x8329
6994#define MDIO_WC_REG_DIGITAL3_LP_UP1 0x832c 7116#define MDIO_WC_REG_DIGITAL3_LP_UP1 0x832c
6995#define MDIO_WC_REG_DIGITAL4_MISC3 0x833c 7117#define MDIO_WC_REG_DIGITAL4_MISC3 0x833c
7118#define MDIO_WC_REG_DIGITAL4_MISC5 0x833e
6996#define MDIO_WC_REG_DIGITAL5_MISC6 0x8345 7119#define MDIO_WC_REG_DIGITAL5_MISC6 0x8345
6997#define MDIO_WC_REG_DIGITAL5_MISC7 0x8349 7120#define MDIO_WC_REG_DIGITAL5_MISC7 0x8349
6998#define MDIO_WC_REG_DIGITAL5_ACTUAL_SPEED 0x834e 7121#define MDIO_WC_REG_DIGITAL5_ACTUAL_SPEED 0x834e
6999#define MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL 0x8350 7122#define MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL 0x8350
7000#define MDIO_WC_REG_CL49_USERB0_CTRL 0x8368 7123#define MDIO_WC_REG_CL49_USERB0_CTRL 0x8368
7124#define MDIO_WC_REG_EEE_COMBO_CONTROL0 0x8390
7001#define MDIO_WC_REG_TX66_CONTROL 0x83b0 7125#define MDIO_WC_REG_TX66_CONTROL 0x83b0
7002#define MDIO_WC_REG_RX66_CONTROL 0x83c0 7126#define MDIO_WC_REG_RX66_CONTROL 0x83c0
7003#define MDIO_WC_REG_RX66_SCW0 0x83c2 7127#define MDIO_WC_REG_RX66_SCW0 0x83c2
@@ -7036,6 +7160,7 @@ Theotherbitsarereservedandshouldbezero*/
7036#define MDIO_REG_GPHY_EEE_1G (0x1 << 2) 7160#define MDIO_REG_GPHY_EEE_1G (0x1 << 2)
7037#define MDIO_REG_GPHY_EEE_100 (0x1 << 1) 7161#define MDIO_REG_GPHY_EEE_100 (0x1 << 1)
7038#define MDIO_REG_GPHY_EEE_RESOLVED 0x803e 7162#define MDIO_REG_GPHY_EEE_RESOLVED 0x803e
7163#define MDIO_REG_GPHY_AUX_STATUS 0x19
7039#define MDIO_REG_INTR_STATUS 0x1a 7164#define MDIO_REG_INTR_STATUS 0x1a
7040#define MDIO_REG_INTR_MASK 0x1b 7165#define MDIO_REG_INTR_MASK 0x1b
7041#define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1) 7166#define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1)
@@ -7150,8 +7275,7 @@ Theotherbitsarereservedandshouldbezero*/
7150#define CDU_REGION_NUMBER_UCM_AG 4 7275#define CDU_REGION_NUMBER_UCM_AG 4
7151 7276
7152 7277
7153/** 7278/* String-to-compress [31:8] = CID (all 24 bits)
7154 * String-to-compress [31:8] = CID (all 24 bits)
7155 * String-to-compress [7:4] = Region 7279 * String-to-compress [7:4] = Region
7156 * String-to-compress [3:0] = Type 7280 * String-to-compress [3:0] = Type
7157 */ 7281 */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 6c14b4a4e82c..734fd87cd990 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -4107,6 +4107,10 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
4107 data->capabilities |= 4107 data->capabilities |=
4108 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY; 4108 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4109 4109
4110 if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
4111 data->capabilities |=
4112 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
4113
4110 if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags)) 4114 if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4111 data->capabilities |= 4115 data->capabilities |=
4112 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY; 4116 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
@@ -4115,6 +4119,10 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
4115 data->capabilities |= 4119 data->capabilities |=
4116 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY; 4120 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4117 4121
4122 if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
4123 data->capabilities |=
4124 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
4125
4118 /* Hashing mask */ 4126 /* Hashing mask */
4119 data->rss_result_mask = p->rss_result_mask; 4127 data->rss_result_mask = p->rss_result_mask;
4120 4128
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index efd80bdd0dfe..f83e033da6da 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -167,9 +167,8 @@ typedef int (*exe_q_remove)(struct bnx2x *bp,
167 union bnx2x_qable_obj *o, 167 union bnx2x_qable_obj *o,
168 struct bnx2x_exeq_elem *elem); 168 struct bnx2x_exeq_elem *elem);
169 169
170/** 170/* Return positive if entry was optimized, 0 - if not, negative
171 * @return positive is entry was optimized, 0 - if not, negative 171 * in case of an error.
172 * in case of an error.
173 */ 172 */
174typedef int (*exe_q_optimize)(struct bnx2x *bp, 173typedef int (*exe_q_optimize)(struct bnx2x *bp,
175 union bnx2x_qable_obj *o, 174 union bnx2x_qable_obj *o,
@@ -694,8 +693,10 @@ enum {
694 693
695 BNX2X_RSS_IPV4, 694 BNX2X_RSS_IPV4,
696 BNX2X_RSS_IPV4_TCP, 695 BNX2X_RSS_IPV4_TCP,
696 BNX2X_RSS_IPV4_UDP,
697 BNX2X_RSS_IPV6, 697 BNX2X_RSS_IPV6,
698 BNX2X_RSS_IPV6_TCP, 698 BNX2X_RSS_IPV6_TCP,
699 BNX2X_RSS_IPV6_UDP,
699}; 700};
700 701
701struct bnx2x_config_rss_params { 702struct bnx2x_config_rss_params {
@@ -729,6 +730,10 @@ struct bnx2x_rss_config_obj {
729 /* Last configured indirection table */ 730 /* Last configured indirection table */
730 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; 731 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
731 732
733 /* flags for enabling 4-tupple hash on UDP */
734 u8 udp_rss_v4;
735 u8 udp_rss_v6;
736
732 int (*config_rss)(struct bnx2x *bp, 737 int (*config_rss)(struct bnx2x *bp,
733 struct bnx2x_config_rss_params *p); 738 struct bnx2x_config_rss_params *p);
734}; 739};
@@ -1280,12 +1285,11 @@ void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
1280 struct bnx2x_rx_mode_obj *o); 1285 struct bnx2x_rx_mode_obj *o);
1281 1286
1282/** 1287/**
1283 * Send and RX_MODE ramrod according to the provided parameters. 1288 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
1284 * 1289 *
1285 * @param bp 1290 * @p: Command parameters
1286 * @param p Command parameters
1287 * 1291 *
1288 * @return 0 - if operation was successfull and there is no pending completions, 1292 * Return: 0 - if operation was successfull and there is no pending completions,
1289 * positive number - if there are pending completions, 1293 * positive number - if there are pending completions,
1290 * negative - if there were errors 1294 * negative - if there were errors
1291 */ 1295 */
@@ -1302,7 +1306,11 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp,
1302 bnx2x_obj_type type); 1306 bnx2x_obj_type type);
1303 1307
1304/** 1308/**
1305 * Configure multicast MACs list. May configure a new list 1309 * bnx2x_config_mcast - Configure multicast MACs list.
1310 *
1311 * @cmd: command to execute: BNX2X_MCAST_CMD_X
1312 *
1313 * May configure a new list
1306 * provided in p->mcast_list (BNX2X_MCAST_CMD_ADD), clean up 1314 * provided in p->mcast_list (BNX2X_MCAST_CMD_ADD), clean up
1307 * (BNX2X_MCAST_CMD_DEL) or restore (BNX2X_MCAST_CMD_RESTORE) a current 1315 * (BNX2X_MCAST_CMD_DEL) or restore (BNX2X_MCAST_CMD_RESTORE) a current
1308 * configuration, continue to execute the pending commands 1316 * configuration, continue to execute the pending commands
@@ -1313,11 +1321,7 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp,
1313 * the current command will be enqueued to the tail of the 1321 * the current command will be enqueued to the tail of the
1314 * pending commands list. 1322 * pending commands list.
1315 * 1323 *
1316 * @param bp 1324 * Return: 0 is operation was sucessfull and there are no pending completions,
1317 * @param p
1318 * @param command to execute: BNX2X_MCAST_CMD_X
1319 *
1320 * @return 0 is operation was sucessfull and there are no pending completions,
1321 * negative if there were errors, positive if there are pending 1325 * negative if there were errors, positive if there are pending
1322 * completions. 1326 * completions.
1323 */ 1327 */
@@ -1342,21 +1346,17 @@ void bnx2x_init_rss_config_obj(struct bnx2x *bp,
1342 bnx2x_obj_type type); 1346 bnx2x_obj_type type);
1343 1347
1344/** 1348/**
1345 * Updates RSS configuration according to provided parameters. 1349 * bnx2x_config_rss - Updates RSS configuration according to provided parameters
1346 *
1347 * @param bp
1348 * @param p
1349 * 1350 *
1350 * @return 0 in case of success 1351 * Return: 0 in case of success
1351 */ 1352 */
1352int bnx2x_config_rss(struct bnx2x *bp, 1353int bnx2x_config_rss(struct bnx2x *bp,
1353 struct bnx2x_config_rss_params *p); 1354 struct bnx2x_config_rss_params *p);
1354 1355
1355/** 1356/**
1356 * Return the current ind_table configuration. 1357 * bnx2x_get_rss_ind_table - Return the current ind_table configuration.
1357 * 1358 *
1358 * @param bp 1359 * @ind_table: buffer to fill with the current indirection
1359 * @param ind_table buffer to fill with the current indirection
1360 * table content. Should be at least 1360 * table content. Should be at least
1361 * T_ETH_INDIRECTION_TABLE_SIZE bytes long. 1361 * T_ETH_INDIRECTION_TABLE_SIZE bytes long.
1362 */ 1362 */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 1e2785cd11d0..667d89042d35 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -785,6 +785,10 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
785 785
786 pstats->host_port_stats_counter++; 786 pstats->host_port_stats_counter++;
787 787
788 if (CHIP_IS_E3(bp))
789 estats->eee_tx_lpi += REG_RD(bp,
790 MISC_REG_CPMU_LP_SM_ENT_CNT_P0);
791
788 if (!BP_NOMCP(bp)) { 792 if (!BP_NOMCP(bp)) {
789 u32 nig_timer_max = 793 u32 nig_timer_max =
790 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer); 794 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
@@ -855,17 +859,22 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
855 struct tstorm_per_queue_stats *tclient = 859 struct tstorm_per_queue_stats *tclient =
856 &bp->fw_stats_data->queue_stats[i]. 860 &bp->fw_stats_data->queue_stats[i].
857 tstorm_queue_statistics; 861 tstorm_queue_statistics;
858 struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient; 862 struct tstorm_per_queue_stats *old_tclient =
863 &bnx2x_fp_stats(bp, fp)->old_tclient;
859 struct ustorm_per_queue_stats *uclient = 864 struct ustorm_per_queue_stats *uclient =
860 &bp->fw_stats_data->queue_stats[i]. 865 &bp->fw_stats_data->queue_stats[i].
861 ustorm_queue_statistics; 866 ustorm_queue_statistics;
862 struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient; 867 struct ustorm_per_queue_stats *old_uclient =
868 &bnx2x_fp_stats(bp, fp)->old_uclient;
863 struct xstorm_per_queue_stats *xclient = 869 struct xstorm_per_queue_stats *xclient =
864 &bp->fw_stats_data->queue_stats[i]. 870 &bp->fw_stats_data->queue_stats[i].
865 xstorm_queue_statistics; 871 xstorm_queue_statistics;
866 struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient; 872 struct xstorm_per_queue_stats *old_xclient =
867 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; 873 &bnx2x_fp_stats(bp, fp)->old_xclient;
868 struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old; 874 struct bnx2x_eth_q_stats *qstats =
875 &bnx2x_fp_stats(bp, fp)->eth_q_stats;
876 struct bnx2x_eth_q_stats_old *qstats_old =
877 &bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
869 878
870 u32 diff; 879 u32 diff;
871 880
@@ -1048,8 +1057,11 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
1048 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); 1057 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
1049 1058
1050 tmp = estats->mac_discard; 1059 tmp = estats->mac_discard;
1051 for_each_rx_queue(bp, i) 1060 for_each_rx_queue(bp, i) {
1052 tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); 1061 struct tstorm_per_queue_stats *old_tclient =
1062 &bp->fp_stats[i].old_tclient;
1063 tmp += le32_to_cpu(old_tclient->checksum_discard);
1064 }
1053 nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped; 1065 nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped;
1054 1066
1055 nstats->tx_dropped = 0; 1067 nstats->tx_dropped = 0;
@@ -1099,9 +1111,9 @@ static void bnx2x_drv_stats_update(struct bnx2x *bp)
1099 int i; 1111 int i;
1100 1112
1101 for_each_queue(bp, i) { 1113 for_each_queue(bp, i) {
1102 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats; 1114 struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
1103 struct bnx2x_eth_q_stats_old *qstats_old = 1115 struct bnx2x_eth_q_stats_old *qstats_old =
1104 &bp->fp[i].eth_q_stats_old; 1116 &bp->fp_stats[i].eth_q_stats_old;
1105 1117
1106 UPDATE_ESTAT_QSTAT(driver_xoff); 1118 UPDATE_ESTAT_QSTAT(driver_xoff);
1107 UPDATE_ESTAT_QSTAT(rx_err_discard_pkt); 1119 UPDATE_ESTAT_QSTAT(rx_err_discard_pkt);
@@ -1309,12 +1321,9 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
1309 bnx2x_stats_comp(bp); 1321 bnx2x_stats_comp(bp);
1310} 1322}
1311 1323
1312/** 1324/* This function will prepare the statistics ramrod data the way
1313 * This function will prepare the statistics ramrod data the way
1314 * we will only have to increment the statistics counter and 1325 * we will only have to increment the statistics counter and
1315 * send the ramrod each time we have to. 1326 * send the ramrod each time we have to.
1316 *
1317 * @param bp
1318 */ 1327 */
1319static void bnx2x_prep_fw_stats_req(struct bnx2x *bp) 1328static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
1320{ 1329{
@@ -1428,7 +1437,7 @@ static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
1428 query[first_queue_query_index + i]; 1437 query[first_queue_query_index + i];
1429 1438
1430 cur_query_entry->kind = STATS_TYPE_QUEUE; 1439 cur_query_entry->kind = STATS_TYPE_QUEUE;
1431 cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX]); 1440 cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX(bp)]);
1432 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1441 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1433 cur_query_entry->address.hi = 1442 cur_query_entry->address.hi =
1434 cpu_to_le32(U64_HI(cur_data_offset)); 1443 cpu_to_le32(U64_HI(cur_data_offset));
@@ -1479,15 +1488,19 @@ void bnx2x_stats_init(struct bnx2x *bp)
1479 1488
1480 /* function stats */ 1489 /* function stats */
1481 for_each_queue(bp, i) { 1490 for_each_queue(bp, i) {
1482 struct bnx2x_fastpath *fp = &bp->fp[i]; 1491 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
1483 1492
1484 memset(&fp->old_tclient, 0, sizeof(fp->old_tclient)); 1493 memset(&fp_stats->old_tclient, 0,
1485 memset(&fp->old_uclient, 0, sizeof(fp->old_uclient)); 1494 sizeof(fp_stats->old_tclient));
1486 memset(&fp->old_xclient, 0, sizeof(fp->old_xclient)); 1495 memset(&fp_stats->old_uclient, 0,
1496 sizeof(fp_stats->old_uclient));
1497 memset(&fp_stats->old_xclient, 0,
1498 sizeof(fp_stats->old_xclient));
1487 if (bp->stats_init) { 1499 if (bp->stats_init) {
1488 memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats)); 1500 memset(&fp_stats->eth_q_stats, 0,
1489 memset(&fp->eth_q_stats_old, 0, 1501 sizeof(fp_stats->eth_q_stats));
1490 sizeof(fp->eth_q_stats_old)); 1502 memset(&fp_stats->eth_q_stats_old, 0,
1503 sizeof(fp_stats->eth_q_stats_old));
1491 } 1504 }
1492 } 1505 }
1493 1506
@@ -1529,8 +1542,10 @@ void bnx2x_save_statistics(struct bnx2x *bp)
1529 /* save queue statistics */ 1542 /* save queue statistics */
1530 for_each_eth_queue(bp, i) { 1543 for_each_eth_queue(bp, i) {
1531 struct bnx2x_fastpath *fp = &bp->fp[i]; 1544 struct bnx2x_fastpath *fp = &bp->fp[i];
1532 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; 1545 struct bnx2x_eth_q_stats *qstats =
1533 struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old; 1546 &bnx2x_fp_stats(bp, fp)->eth_q_stats;
1547 struct bnx2x_eth_q_stats_old *qstats_old =
1548 &bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
1534 1549
1535 UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi); 1550 UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
1536 UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo); 1551 UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
@@ -1569,7 +1584,7 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
1569 struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats; 1584 struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
1570 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1585 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1571 struct per_queue_stats *fcoe_q_stats = 1586 struct per_queue_stats *fcoe_q_stats =
1572 &bp->fw_stats_data->queue_stats[FCOE_IDX]; 1587 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)];
1573 1588
1574 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats = 1589 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
1575 &fcoe_q_stats->tstorm_queue_statistics; 1590 &fcoe_q_stats->tstorm_queue_statistics;
@@ -1586,8 +1601,7 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
1586 memset(afex_stats, 0, sizeof(struct afex_stats)); 1601 memset(afex_stats, 0, sizeof(struct afex_stats));
1587 1602
1588 for_each_eth_queue(bp, i) { 1603 for_each_eth_queue(bp, i) {
1589 struct bnx2x_fastpath *fp = &bp->fp[i]; 1604 struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
1590 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
1591 1605
1592 ADD_64(afex_stats->rx_unicast_bytes_hi, 1606 ADD_64(afex_stats->rx_unicast_bytes_hi,
1593 qstats->total_unicast_bytes_received_hi, 1607 qstats->total_unicast_bytes_received_hi,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 93e689fdfeda..24b8e505b60c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -203,6 +203,8 @@ struct bnx2x_eth_stats {
203 /* Recovery */ 203 /* Recovery */
204 u32 recoverable_error; 204 u32 recoverable_error;
205 u32 unrecoverable_error; 205 u32 unrecoverable_error;
206 /* src: Clear-on-Read register; Will not survive PMF Migration */
207 u32 eee_tx_lpi;
206}; 208};
207 209
208 210
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 2c89d17cbb29..3b4fc61f24cf 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -256,11 +256,16 @@ static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
256 struct cnic_local *cp = dev->cnic_priv; 256 struct cnic_local *cp = dev->cnic_priv;
257 struct cnic_eth_dev *ethdev = cp->ethdev; 257 struct cnic_eth_dev *ethdev = cp->ethdev;
258 struct drv_ctl_info info; 258 struct drv_ctl_info info;
259 struct fcoe_capabilities *fcoe_cap =
260 &info.data.register_data.fcoe_features;
259 261
260 if (reg) 262 if (reg) {
261 info.cmd = DRV_CTL_ULP_REGISTER_CMD; 263 info.cmd = DRV_CTL_ULP_REGISTER_CMD;
262 else 264 if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
265 memcpy(fcoe_cap, dev->fcoe_cap, sizeof(*fcoe_cap));
266 } else {
263 info.cmd = DRV_CTL_ULP_UNREGISTER_CMD; 267 info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
268 }
264 269
265 info.data.ulp_type = ulp_type; 270 info.data.ulp_type = ulp_type;
266 ethdev->drv_ctl(dev->netdev, &info); 271 ethdev->drv_ctl(dev->netdev, &info);
@@ -286,6 +291,9 @@ static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
286{ 291{
287 u32 i; 292 u32 i;
288 293
294 if (!cp->ctx_tbl)
295 return -EINVAL;
296
289 for (i = 0; i < cp->max_cid_space; i++) { 297 for (i = 0; i < cp->max_cid_space; i++) {
290 if (cp->ctx_tbl[i].cid == cid) { 298 if (cp->ctx_tbl[i].cid == cid) {
291 *l5_cid = i; 299 *l5_cid = i;
@@ -612,6 +620,8 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
612 620
613 if (ulp_type == CNIC_ULP_ISCSI) 621 if (ulp_type == CNIC_ULP_ISCSI)
614 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); 622 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
623 else if (ulp_type == CNIC_ULP_FCOE)
624 dev->fcoe_cap = NULL;
615 625
616 synchronize_rcu(); 626 synchronize_rcu();
617 627
@@ -2589,7 +2599,7 @@ static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
2589 return; 2599 return;
2590 } 2600 }
2591 2601
2592 cqes[0] = (struct kcqe *) &kcqe; 2602 cqes[0] = &kcqe;
2593 cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1); 2603 cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
2594} 2604}
2595 2605
@@ -3217,6 +3227,9 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info)
3217 u32 l5_cid; 3227 u32 l5_cid;
3218 struct cnic_local *cp = dev->cnic_priv; 3228 struct cnic_local *cp = dev->cnic_priv;
3219 3229
3230 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
3231 break;
3232
3220 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) { 3233 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
3221 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 3234 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3222 3235
@@ -3947,6 +3960,15 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3947 cnic_cm_upcall(cp, csk, opcode); 3960 cnic_cm_upcall(cp, csk, opcode);
3948 break; 3961 break;
3949 3962
3963 case L5CM_RAMROD_CMD_ID_CLOSE:
3964 if (l4kcqe->status != 0) {
3965 netdev_warn(dev->netdev, "RAMROD CLOSE compl with "
3966 "status 0x%x\n", l4kcqe->status);
3967 opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3968 /* Fall through */
3969 } else {
3970 break;
3971 }
3950 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: 3972 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
3951 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: 3973 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
3952 case L4_KCQE_OPCODE_VALUE_RESET_COMP: 3974 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
@@ -4250,8 +4272,6 @@ static int cnic_cm_shutdown(struct cnic_dev *dev)
4250 struct cnic_local *cp = dev->cnic_priv; 4272 struct cnic_local *cp = dev->cnic_priv;
4251 int i; 4273 int i;
4252 4274
4253 cp->stop_cm(dev);
4254
4255 if (!cp->csk_tbl) 4275 if (!cp->csk_tbl)
4256 return 0; 4276 return 0;
4257 4277
@@ -4669,9 +4689,9 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4669 4689
4670 cp->kcq1.sw_prod_idx = 0; 4690 cp->kcq1.sw_prod_idx = 0;
4671 cp->kcq1.hw_prod_idx_ptr = 4691 cp->kcq1.hw_prod_idx_ptr =
4672 (u16 *) &sblk->status_completion_producer_index; 4692 &sblk->status_completion_producer_index;
4673 4693
4674 cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx; 4694 cp->kcq1.status_idx_ptr = &sblk->status_idx;
4675 4695
4676 /* Initialize the kernel complete queue context. */ 4696 /* Initialize the kernel complete queue context. */
4677 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | 4697 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
@@ -4697,9 +4717,9 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4697 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id); 4717 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
4698 4718
4699 cp->kcq1.hw_prod_idx_ptr = 4719 cp->kcq1.hw_prod_idx_ptr =
4700 (u16 *) &msblk->status_completion_producer_index; 4720 &msblk->status_completion_producer_index;
4701 cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx; 4721 cp->kcq1.status_idx_ptr = &msblk->status_idx;
4702 cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index; 4722 cp->kwq_con_idx_ptr = &msblk->status_cmd_consumer_index;
4703 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT; 4723 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
4704 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); 4724 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4705 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); 4725 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
@@ -4981,8 +5001,14 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
4981 cp->port_mode = CHIP_PORT_MODE_NONE; 5001 cp->port_mode = CHIP_PORT_MODE_NONE;
4982 5002
4983 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 5003 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
4984 u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR); 5004 u32 val;
4985 5005
5006 pci_read_config_dword(dev->pcidev, PCICFG_ME_REGISTER, &val);
5007 cp->func = (u8) ((val & ME_REG_ABS_PF_NUM) >>
5008 ME_REG_ABS_PF_NUM_SHIFT);
5009 func = CNIC_FUNC(cp);
5010
5011 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
4986 if (!(val & 1)) 5012 if (!(val & 1))
4987 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN); 5013 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN);
4988 else 5014 else
@@ -5287,6 +5313,7 @@ static void cnic_stop_hw(struct cnic_dev *dev)
5287 i++; 5313 i++;
5288 } 5314 }
5289 cnic_shutdown_rings(dev); 5315 cnic_shutdown_rings(dev);
5316 cp->stop_cm(dev);
5290 clear_bit(CNIC_F_CNIC_UP, &dev->flags); 5317 clear_bit(CNIC_F_CNIC_UP, &dev->flags);
5291 RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL); 5318 RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
5292 synchronize_rcu(); 5319 synchronize_rcu();
@@ -5516,9 +5543,7 @@ static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
5516 rcu_read_unlock(); 5543 rcu_read_unlock();
5517} 5544}
5518 5545
5519/** 5546/* netdev event handler */
5520 * netdev event handler
5521 */
5522static int cnic_netdev_event(struct notifier_block *this, unsigned long event, 5547static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
5523 void *ptr) 5548 void *ptr)
5524{ 5549{
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 289274e546be..5cb88881bba1 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -12,8 +12,10 @@
12#ifndef CNIC_IF_H 12#ifndef CNIC_IF_H
13#define CNIC_IF_H 13#define CNIC_IF_H
14 14
15#define CNIC_MODULE_VERSION "2.5.10" 15#include "bnx2x/bnx2x_mfw_req.h"
16#define CNIC_MODULE_RELDATE "March 21, 2012" 16
17#define CNIC_MODULE_VERSION "2.5.12"
18#define CNIC_MODULE_RELDATE "June 29, 2012"
17 19
18#define CNIC_ULP_RDMA 0 20#define CNIC_ULP_RDMA 0
19#define CNIC_ULP_ISCSI 1 21#define CNIC_ULP_ISCSI 1
@@ -131,6 +133,11 @@ struct drv_ctl_l2_ring {
131 u32 cid; 133 u32 cid;
132}; 134};
133 135
136struct drv_ctl_register_data {
137 int ulp_type;
138 struct fcoe_capabilities fcoe_features;
139};
140
134struct drv_ctl_info { 141struct drv_ctl_info {
135 int cmd; 142 int cmd;
136 union { 143 union {
@@ -138,6 +145,7 @@ struct drv_ctl_info {
138 struct drv_ctl_io io; 145 struct drv_ctl_io io;
139 struct drv_ctl_l2_ring ring; 146 struct drv_ctl_l2_ring ring;
140 int ulp_type; 147 int ulp_type;
148 struct drv_ctl_register_data register_data;
141 char bytes[MAX_DRV_CTL_DATA]; 149 char bytes[MAX_DRV_CTL_DATA];
142 } data; 150 } data;
143}; 151};
@@ -305,6 +313,7 @@ struct cnic_dev {
305 int max_rdma_conn; 313 int max_rdma_conn;
306 314
307 union drv_info_to_mcp *stats_addr; 315 union drv_info_to_mcp *stats_addr;
316 struct fcoe_capabilities *fcoe_cap;
308 317
309 void *cnic_priv; 318 void *cnic_priv;
310}; 319};
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index e47ff8be1d7b..fce4c1e4dd3f 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -44,6 +44,10 @@
44#include <linux/prefetch.h> 44#include <linux/prefetch.h>
45#include <linux/dma-mapping.h> 45#include <linux/dma-mapping.h>
46#include <linux/firmware.h> 46#include <linux/firmware.h>
47#if IS_ENABLED(CONFIG_HWMON)
48#include <linux/hwmon.h>
49#include <linux/hwmon-sysfs.h>
50#endif
47 51
48#include <net/checksum.h> 52#include <net/checksum.h>
49#include <net/ip.h> 53#include <net/ip.h>
@@ -298,6 +302,7 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)}, 302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)}, 303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)}, 304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
301 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, 306 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, 307 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, 308 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -730,44 +735,131 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
730 tg3_ape_write32(tp, gnt + 4 * locknum, bit); 735 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
731} 736}
732 737
733static void tg3_ape_send_event(struct tg3 *tp, u32 event) 738static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
734{ 739{
735 int i;
736 u32 apedata; 740 u32 apedata;
737 741
738 /* NCSI does not support APE events */ 742 while (timeout_us) {
739 if (tg3_flag(tp, APE_HAS_NCSI)) 743 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
740 return; 744 return -EBUSY;
745
746 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
747 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
748 break;
749
750 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
751
752 udelay(10);
753 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
754 }
755
756 return timeout_us ? 0 : -EBUSY;
757}
758
759static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
760{
761 u32 i, apedata;
762
763 for (i = 0; i < timeout_us / 10; i++) {
764 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
765
766 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
767 break;
768
769 udelay(10);
770 }
771
772 return i == timeout_us / 10;
773}
774
775int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off, u32 len)
776{
777 int err;
778 u32 i, bufoff, msgoff, maxlen, apedata;
779
780 if (!tg3_flag(tp, APE_HAS_NCSI))
781 return 0;
741 782
742 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 783 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
743 if (apedata != APE_SEG_SIG_MAGIC) 784 if (apedata != APE_SEG_SIG_MAGIC)
744 return; 785 return -ENODEV;
745 786
746 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 787 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
747 if (!(apedata & APE_FW_STATUS_READY)) 788 if (!(apedata & APE_FW_STATUS_READY))
748 return; 789 return -EAGAIN;
749 790
750 /* Wait for up to 1 millisecond for APE to service previous event. */ 791 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
751 for (i = 0; i < 10; i++) { 792 TG3_APE_SHMEM_BASE;
752 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM)) 793 msgoff = bufoff + 2 * sizeof(u32);
753 return; 794 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
754 795
755 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); 796 while (len) {
797 u32 length;
756 798
757 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) 799 /* Cap xfer sizes to scratchpad limits. */
758 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, 800 length = (len > maxlen) ? maxlen : len;
759 event | APE_EVENT_STATUS_EVENT_PENDING); 801 len -= length;
802
803 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
804 if (!(apedata & APE_FW_STATUS_READY))
805 return -EAGAIN;
806
807 /* Wait for up to 1 msec for APE to service previous event. */
808 err = tg3_ape_event_lock(tp, 1000);
809 if (err)
810 return err;
811
812 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
813 APE_EVENT_STATUS_SCRTCHPD_READ |
814 APE_EVENT_STATUS_EVENT_PENDING;
815 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
816
817 tg3_ape_write32(tp, bufoff, base_off);
818 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
760 819
761 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 820 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
821 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
762 822
763 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) 823 base_off += length;
764 break;
765 824
766 udelay(100); 825 if (tg3_ape_wait_for_event(tp, 30000))
826 return -EAGAIN;
827
828 for (i = 0; length; i += 4, length -= 4) {
829 u32 val = tg3_ape_read32(tp, msgoff + i);
830 memcpy(data, &val, sizeof(u32));
831 data++;
832 }
767 } 833 }
768 834
769 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) 835 return 0;
770 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); 836}
837
838static int tg3_ape_send_event(struct tg3 *tp, u32 event)
839{
840 int err;
841 u32 apedata;
842
843 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
844 if (apedata != APE_SEG_SIG_MAGIC)
845 return -EAGAIN;
846
847 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
848 if (!(apedata & APE_FW_STATUS_READY))
849 return -EAGAIN;
850
851 /* Wait for up to 1 millisecond for APE to service previous event. */
852 err = tg3_ape_event_lock(tp, 1000);
853 if (err)
854 return err;
855
856 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
857 event | APE_EVENT_STATUS_EVENT_PENDING);
858
859 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
860 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
861
862 return 0;
771} 863}
772 864
773static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) 865static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
@@ -9393,6 +9485,110 @@ static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9393 return tg3_reset_hw(tp, reset_phy); 9485 return tg3_reset_hw(tp, reset_phy);
9394} 9486}
9395 9487
9488#if IS_ENABLED(CONFIG_HWMON)
9489static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9490{
9491 int i;
9492
9493 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9494 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9495
9496 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9497 off += len;
9498
9499 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9500 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9501 memset(ocir, 0, TG3_OCIR_LEN);
9502 }
9503}
9504
9505/* sysfs attributes for hwmon */
9506static ssize_t tg3_show_temp(struct device *dev,
9507 struct device_attribute *devattr, char *buf)
9508{
9509 struct pci_dev *pdev = to_pci_dev(dev);
9510 struct net_device *netdev = pci_get_drvdata(pdev);
9511 struct tg3 *tp = netdev_priv(netdev);
9512 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
9513 u32 temperature;
9514
9515 spin_lock_bh(&tp->lock);
9516 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
9517 sizeof(temperature));
9518 spin_unlock_bh(&tp->lock);
9519 return sprintf(buf, "%u\n", temperature);
9520}
9521
9522
9523static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
9524 TG3_TEMP_SENSOR_OFFSET);
9525static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
9526 TG3_TEMP_CAUTION_OFFSET);
9527static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
9528 TG3_TEMP_MAX_OFFSET);
9529
9530static struct attribute *tg3_attributes[] = {
9531 &sensor_dev_attr_temp1_input.dev_attr.attr,
9532 &sensor_dev_attr_temp1_crit.dev_attr.attr,
9533 &sensor_dev_attr_temp1_max.dev_attr.attr,
9534 NULL
9535};
9536
9537static const struct attribute_group tg3_group = {
9538 .attrs = tg3_attributes,
9539};
9540
9541#endif
9542
9543static void tg3_hwmon_close(struct tg3 *tp)
9544{
9545#if IS_ENABLED(CONFIG_HWMON)
9546 if (tp->hwmon_dev) {
9547 hwmon_device_unregister(tp->hwmon_dev);
9548 tp->hwmon_dev = NULL;
9549 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
9550 }
9551#endif
9552}
9553
9554static void tg3_hwmon_open(struct tg3 *tp)
9555{
9556#if IS_ENABLED(CONFIG_HWMON)
9557 int i, err;
9558 u32 size = 0;
9559 struct pci_dev *pdev = tp->pdev;
9560 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
9561
9562 tg3_sd_scan_scratchpad(tp, ocirs);
9563
9564 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
9565 if (!ocirs[i].src_data_length)
9566 continue;
9567
9568 size += ocirs[i].src_hdr_length;
9569 size += ocirs[i].src_data_length;
9570 }
9571
9572 if (!size)
9573 return;
9574
9575 /* Register hwmon sysfs hooks */
9576 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
9577 if (err) {
9578 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
9579 return;
9580 }
9581
9582 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
9583 if (IS_ERR(tp->hwmon_dev)) {
9584 tp->hwmon_dev = NULL;
9585 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
9586 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
9587 }
9588#endif
9589}
9590
9591
9396#define TG3_STAT_ADD32(PSTAT, REG) \ 9592#define TG3_STAT_ADD32(PSTAT, REG) \
9397do { u32 __val = tr32(REG); \ 9593do { u32 __val = tr32(REG); \
9398 (PSTAT)->low += __val; \ 9594 (PSTAT)->low += __val; \
@@ -9908,7 +10104,7 @@ static bool tg3_enable_msix(struct tg3 *tp)
9908 int i, rc; 10104 int i, rc;
9909 struct msix_entry msix_ent[tp->irq_max]; 10105 struct msix_entry msix_ent[tp->irq_max];
9910 10106
9911 tp->irq_cnt = num_online_cpus(); 10107 tp->irq_cnt = netif_get_num_default_rss_queues();
9912 if (tp->irq_cnt > 1) { 10108 if (tp->irq_cnt > 1) {
9913 /* We want as many rx rings enabled as there are cpus. 10109 /* We want as many rx rings enabled as there are cpus.
9914 * In multiqueue MSI-X mode, the first MSI-X vector 10110 * In multiqueue MSI-X mode, the first MSI-X vector
@@ -10101,6 +10297,8 @@ static int tg3_open(struct net_device *dev)
10101 10297
10102 tg3_phy_start(tp); 10298 tg3_phy_start(tp);
10103 10299
10300 tg3_hwmon_open(tp);
10301
10104 tg3_full_lock(tp, 0); 10302 tg3_full_lock(tp, 0);
10105 10303
10106 tg3_timer_start(tp); 10304 tg3_timer_start(tp);
@@ -10150,6 +10348,8 @@ static int tg3_close(struct net_device *dev)
10150 10348
10151 tg3_timer_stop(tp); 10349 tg3_timer_stop(tp);
10152 10350
10351 tg3_hwmon_close(tp);
10352
10153 tg3_phy_stop(tp); 10353 tg3_phy_stop(tp);
10154 10354
10155 tg3_full_lock(tp, 1); 10355 tg3_full_lock(tp, 1);
@@ -13857,14 +14057,9 @@ static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13857 } 14057 }
13858} 14058}
13859 14059
13860static void __devinit tg3_read_dash_ver(struct tg3 *tp) 14060static void __devinit tg3_probe_ncsi(struct tg3 *tp)
13861{ 14061{
13862 int vlen;
13863 u32 apedata; 14062 u32 apedata;
13864 char *fwtype;
13865
13866 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13867 return;
13868 14063
13869 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 14064 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13870 if (apedata != APE_SEG_SIG_MAGIC) 14065 if (apedata != APE_SEG_SIG_MAGIC)
@@ -13874,14 +14069,22 @@ static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13874 if (!(apedata & APE_FW_STATUS_READY)) 14069 if (!(apedata & APE_FW_STATUS_READY))
13875 return; 14070 return;
13876 14071
14072 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14073 tg3_flag_set(tp, APE_HAS_NCSI);
14074}
14075
14076static void __devinit tg3_read_dash_ver(struct tg3 *tp)
14077{
14078 int vlen;
14079 u32 apedata;
14080 char *fwtype;
14081
13877 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION); 14082 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13878 14083
13879 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) { 14084 if (tg3_flag(tp, APE_HAS_NCSI))
13880 tg3_flag_set(tp, APE_HAS_NCSI);
13881 fwtype = "NCSI"; 14085 fwtype = "NCSI";
13882 } else { 14086 else
13883 fwtype = "DASH"; 14087 fwtype = "DASH";
13884 }
13885 14088
13886 vlen = strlen(tp->fw_ver); 14089 vlen = strlen(tp->fw_ver);
13887 14090
@@ -13915,20 +14118,17 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13915 tg3_read_sb_ver(tp, val); 14118 tg3_read_sb_ver(tp, val);
13916 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) 14119 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13917 tg3_read_hwsb_ver(tp); 14120 tg3_read_hwsb_ver(tp);
13918 else
13919 return;
13920
13921 if (vpd_vers)
13922 goto done;
13923 14121
13924 if (tg3_flag(tp, ENABLE_APE)) { 14122 if (tg3_flag(tp, ENABLE_ASF)) {
13925 if (tg3_flag(tp, ENABLE_ASF)) 14123 if (tg3_flag(tp, ENABLE_APE)) {
13926 tg3_read_dash_ver(tp); 14124 tg3_probe_ncsi(tp);
13927 } else if (tg3_flag(tp, ENABLE_ASF)) { 14125 if (!vpd_vers)
13928 tg3_read_mgmtfw_ver(tp); 14126 tg3_read_dash_ver(tp);
14127 } else if (!vpd_vers) {
14128 tg3_read_mgmtfw_ver(tp);
14129 }
13929 } 14130 }
13930 14131
13931done:
13932 tp->fw_ver[TG3_VER_SIZE - 1] = 0; 14132 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13933} 14133}
13934 14134
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 93865f899a4f..a1b75cd67b9d 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2311,10 +2311,11 @@
2311#define APE_LOCK_REQ_DRIVER 0x00001000 2311#define APE_LOCK_REQ_DRIVER 0x00001000
2312#define TG3_APE_LOCK_GRANT 0x004c 2312#define TG3_APE_LOCK_GRANT 0x004c
2313#define APE_LOCK_GRANT_DRIVER 0x00001000 2313#define APE_LOCK_GRANT_DRIVER 0x00001000
2314#define TG3_APE_SEG_SIG 0x4000
2315#define APE_SEG_SIG_MAGIC 0x41504521
2316 2314
2317/* APE shared memory. Accessible through BAR1 */ 2315/* APE shared memory. Accessible through BAR1 */
2316#define TG3_APE_SHMEM_BASE 0x4000
2317#define TG3_APE_SEG_SIG 0x4000
2318#define APE_SEG_SIG_MAGIC 0x41504521
2318#define TG3_APE_FW_STATUS 0x400c 2319#define TG3_APE_FW_STATUS 0x400c
2319#define APE_FW_STATUS_READY 0x00000100 2320#define APE_FW_STATUS_READY 0x00000100
2320#define TG3_APE_FW_FEATURES 0x4010 2321#define TG3_APE_FW_FEATURES 0x4010
@@ -2327,6 +2328,8 @@
2327#define APE_FW_VERSION_REVMSK 0x0000ff00 2328#define APE_FW_VERSION_REVMSK 0x0000ff00
2328#define APE_FW_VERSION_REVSFT 8 2329#define APE_FW_VERSION_REVSFT 8
2329#define APE_FW_VERSION_BLDMSK 0x000000ff 2330#define APE_FW_VERSION_BLDMSK 0x000000ff
2331#define TG3_APE_SEG_MSG_BUF_OFF 0x401c
2332#define TG3_APE_SEG_MSG_BUF_LEN 0x4020
2330#define TG3_APE_HOST_SEG_SIG 0x4200 2333#define TG3_APE_HOST_SEG_SIG 0x4200
2331#define APE_HOST_SEG_SIG_MAGIC 0x484f5354 2334#define APE_HOST_SEG_SIG_MAGIC 0x484f5354
2332#define TG3_APE_HOST_SEG_LEN 0x4204 2335#define TG3_APE_HOST_SEG_LEN 0x4204
@@ -2353,6 +2356,8 @@
2353 2356
2354#define APE_EVENT_STATUS_DRIVER_EVNT 0x00000010 2357#define APE_EVENT_STATUS_DRIVER_EVNT 0x00000010
2355#define APE_EVENT_STATUS_STATE_CHNGE 0x00000500 2358#define APE_EVENT_STATUS_STATE_CHNGE 0x00000500
2359#define APE_EVENT_STATUS_SCRTCHPD_READ 0x00001600
2360#define APE_EVENT_STATUS_SCRTCHPD_WRITE 0x00001700
2356#define APE_EVENT_STATUS_STATE_START 0x00010000 2361#define APE_EVENT_STATUS_STATE_START 0x00010000
2357#define APE_EVENT_STATUS_STATE_UNLOAD 0x00020000 2362#define APE_EVENT_STATUS_STATE_UNLOAD 0x00020000
2358#define APE_EVENT_STATUS_STATE_WOL 0x00030000 2363#define APE_EVENT_STATUS_STATE_WOL 0x00030000
@@ -2671,6 +2676,40 @@ struct tg3_hw_stats {
2671 u8 __reserved4[0xb00-0x9c8]; 2676 u8 __reserved4[0xb00-0x9c8];
2672}; 2677};
2673 2678
2679#define TG3_SD_NUM_RECS 3
2680#define TG3_OCIR_LEN (sizeof(struct tg3_ocir))
2681#define TG3_OCIR_SIG_MAGIC 0x5253434f
2682#define TG3_OCIR_FLAG_ACTIVE 0x00000001
2683
2684#define TG3_TEMP_CAUTION_OFFSET 0xc8
2685#define TG3_TEMP_MAX_OFFSET 0xcc
2686#define TG3_TEMP_SENSOR_OFFSET 0xd4
2687
2688
2689struct tg3_ocir {
2690 u32 signature;
2691 u16 version_flags;
2692 u16 refresh_int;
2693 u32 refresh_tmr;
2694 u32 update_tmr;
2695 u32 dst_base_addr;
2696 u16 src_hdr_offset;
2697 u16 src_hdr_length;
2698 u16 src_data_offset;
2699 u16 src_data_length;
2700 u16 dst_hdr_offset;
2701 u16 dst_data_offset;
2702 u16 dst_reg_upd_offset;
2703 u16 dst_sem_offset;
2704 u32 reserved1[2];
2705 u32 port0_flags;
2706 u32 port1_flags;
2707 u32 port2_flags;
2708 u32 port3_flags;
2709 u32 reserved2[1];
2710};
2711
2712
2674/* 'mapping' is superfluous as the chip does not write into 2713/* 'mapping' is superfluous as the chip does not write into
2675 * the tx/rx post rings so we could just fetch it from there. 2714 * the tx/rx post rings so we could just fetch it from there.
2676 * But the cache behavior is better how we are doing it now. 2715 * But the cache behavior is better how we are doing it now.
@@ -3206,6 +3245,10 @@ struct tg3 {
3206 const char *fw_needed; 3245 const char *fw_needed;
3207 const struct firmware *fw; 3246 const struct firmware *fw;
3208 u32 fw_len; /* includes BSS */ 3247 u32 fw_len; /* includes BSS */
3248
3249#if IS_ENABLED(CONFIG_HWMON)
3250 struct device *hwmon_dev;
3251#endif
3209}; 3252};
3210 3253
3211#endif /* !(_T3_H) */ 3254#endif /* !(_T3_H) */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c
index 689e5e19cc0b..550d2521ba76 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cee.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c
@@ -52,13 +52,7 @@ bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg *lldp_cfg)
52} 52}
53 53
54/** 54/**
55 * bfa_cee_attr_meminfo() 55 * bfa_cee_attr_meminfo - Returns the size of the DMA memory needed by CEE attributes
56 *
57 * @brief Returns the size of the DMA memory needed by CEE attributes
58 *
59 * @param[in] void
60 *
61 * @return Size of DMA region
62 */ 56 */
63static u32 57static u32
64bfa_cee_attr_meminfo(void) 58bfa_cee_attr_meminfo(void)
@@ -66,13 +60,7 @@ bfa_cee_attr_meminfo(void)
66 return roundup(sizeof(struct bfa_cee_attr), BFA_DMA_ALIGN_SZ); 60 return roundup(sizeof(struct bfa_cee_attr), BFA_DMA_ALIGN_SZ);
67} 61}
68/** 62/**
69 * bfa_cee_stats_meminfo() 63 * bfa_cee_stats_meminfo - Returns the size of the DMA memory needed by CEE stats
70 *
71 * @brief Returns the size of the DMA memory needed by CEE stats
72 *
73 * @param[in] void
74 *
75 * @return Size of DMA region
76 */ 64 */
77static u32 65static u32
78bfa_cee_stats_meminfo(void) 66bfa_cee_stats_meminfo(void)
@@ -81,14 +69,10 @@ bfa_cee_stats_meminfo(void)
81} 69}
82 70
83/** 71/**
84 * bfa_cee_get_attr_isr() 72 * bfa_cee_get_attr_isr - CEE ISR for get-attributes responses from f/w
85 *
86 * @brief CEE ISR for get-attributes responses from f/w
87 *
88 * @param[in] cee - Pointer to the CEE module
89 * status - Return status from the f/w
90 * 73 *
91 * @return void 74 * @cee: Pointer to the CEE module
75 * @status: Return status from the f/w
92 */ 76 */
93static void 77static void
94bfa_cee_get_attr_isr(struct bfa_cee *cee, enum bfa_status status) 78bfa_cee_get_attr_isr(struct bfa_cee *cee, enum bfa_status status)
@@ -105,14 +89,10 @@ bfa_cee_get_attr_isr(struct bfa_cee *cee, enum bfa_status status)
105} 89}
106 90
107/** 91/**
108 * bfa_cee_get_attr_isr() 92 * bfa_cee_get_attr_isr - CEE ISR for get-stats responses from f/w
109 *
110 * @brief CEE ISR for get-stats responses from f/w
111 * 93 *
112 * @param[in] cee - Pointer to the CEE module 94 * @cee: Pointer to the CEE module
113 * status - Return status from the f/w 95 * @status: Return status from the f/w
114 *
115 * @return void
116 */ 96 */
117static void 97static void
118bfa_cee_get_stats_isr(struct bfa_cee *cee, enum bfa_status status) 98bfa_cee_get_stats_isr(struct bfa_cee *cee, enum bfa_status status)
@@ -147,13 +127,7 @@ bfa_cee_reset_stats_isr(struct bfa_cee *cee, enum bfa_status status)
147 cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status); 127 cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status);
148} 128}
149/** 129/**
150 * bfa_nw_cee_meminfo() 130 * bfa_nw_cee_meminfo - Returns the size of the DMA memory needed by CEE module
151 *
152 * @brief Returns the size of the DMA memory needed by CEE module
153 *
154 * @param[in] void
155 *
156 * @return Size of DMA region
157 */ 131 */
158u32 132u32
159bfa_nw_cee_meminfo(void) 133bfa_nw_cee_meminfo(void)
@@ -162,15 +136,11 @@ bfa_nw_cee_meminfo(void)
162} 136}
163 137
164/** 138/**
165 * bfa_nw_cee_mem_claim() 139 * bfa_nw_cee_mem_claim - Initialized CEE DMA Memory
166 *
167 * @brief Initialized CEE DMA Memory
168 *
169 * @param[in] cee CEE module pointer
170 * dma_kva Kernel Virtual Address of CEE DMA Memory
171 * dma_pa Physical Address of CEE DMA Memory
172 * 140 *
173 * @return void 141 * @cee: CEE module pointer
142 * @dma_kva: Kernel Virtual Address of CEE DMA Memory
143 * @dma_pa: Physical Address of CEE DMA Memory
174 */ 144 */
175void 145void
176bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa) 146bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
@@ -185,13 +155,11 @@ bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
185} 155}
186 156
187/** 157/**
188 * bfa_cee_get_attr() 158 * bfa_cee_get_attr - Send the request to the f/w to fetch CEE attributes.
189 *
190 * @brief Send the request to the f/w to fetch CEE attributes.
191 * 159 *
192 * @param[in] Pointer to the CEE module data structure. 160 * @cee: Pointer to the CEE module data structure.
193 * 161 *
194 * @return Status 162 * Return: status
195 */ 163 */
196enum bfa_status 164enum bfa_status
197bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr, 165bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr,
@@ -220,13 +188,7 @@ bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr,
220} 188}
221 189
222/** 190/**
223 * bfa_cee_isrs() 191 * bfa_cee_isrs - Handles Mail-box interrupts for CEE module.
224 *
225 * @brief Handles Mail-box interrupts for CEE module.
226 *
227 * @param[in] Pointer to the CEE module data structure.
228 *
229 * @return void
230 */ 192 */
231 193
232static void 194static void
@@ -253,14 +215,9 @@ bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m)
253} 215}
254 216
255/** 217/**
256 * bfa_cee_notify() 218 * bfa_cee_notify - CEE module heart-beat failure handler.
257 *
258 * @brief CEE module heart-beat failure handler.
259 * @brief CEE module IOC event handler.
260 *
261 * @param[in] IOC event type
262 * 219 *
263 * @return void 220 * @event: IOC event type
264 */ 221 */
265 222
266static void 223static void
@@ -307,17 +264,13 @@ bfa_cee_notify(void *arg, enum bfa_ioc_event event)
307} 264}
308 265
309/** 266/**
310 * bfa_nw_cee_attach() 267 * bfa_nw_cee_attach - CEE module-attach API
311 *
312 * @brief CEE module-attach API
313 * 268 *
314 * @param[in] cee - Pointer to the CEE module data structure 269 * @cee: Pointer to the CEE module data structure
315 * ioc - Pointer to the ioc module data structure 270 * @ioc: Pointer to the ioc module data structure
316 * dev - Pointer to the device driver module data structure 271 * @dev: Pointer to the device driver module data structure.
317 * The device driver specific mbox ISR functions have 272 * The device driver specific mbox ISR functions have
318 * this pointer as one of the parameters. 273 * this pointer as one of the parameters.
319 *
320 * @return void
321 */ 274 */
322void 275void
323bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, 276bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc,
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cs.h b/drivers/net/ethernet/brocade/bna/bfa_cs.h
index 3da1a946ccdd..ad004a4c3897 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_cs.h
@@ -16,23 +16,18 @@
16 * www.brocade.com 16 * www.brocade.com
17 */ 17 */
18 18
19/** 19/* BFA common services */
20 * @file bfa_cs.h BFA common services
21 */
22 20
23#ifndef __BFA_CS_H__ 21#ifndef __BFA_CS_H__
24#define __BFA_CS_H__ 22#define __BFA_CS_H__
25 23
26#include "cna.h" 24#include "cna.h"
27 25
28/** 26/* BFA state machine interfaces */
29 * @ BFA state machine interfaces
30 */
31 27
32typedef void (*bfa_sm_t)(void *sm, int event); 28typedef void (*bfa_sm_t)(void *sm, int event);
33 29
34/** 30/* oc - object class eg. bfa_ioc
35 * oc - object class eg. bfa_ioc
36 * st - state, eg. reset 31 * st - state, eg. reset
37 * otype - object type, eg. struct bfa_ioc 32 * otype - object type, eg. struct bfa_ioc
38 * etype - object type, eg. enum ioc_event 33 * etype - object type, eg. enum ioc_event
@@ -45,9 +40,7 @@ typedef void (*bfa_sm_t)(void *sm, int event);
45#define bfa_sm_get_state(_sm) ((_sm)->sm) 40#define bfa_sm_get_state(_sm) ((_sm)->sm)
46#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state)) 41#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state))
47 42
48/** 43/* For converting from state machine function to state encoding. */
49 * For converting from state machine function to state encoding.
50 */
51struct bfa_sm_table { 44struct bfa_sm_table {
52 bfa_sm_t sm; /*!< state machine function */ 45 bfa_sm_t sm; /*!< state machine function */
53 int state; /*!< state machine encoding */ 46 int state; /*!< state machine encoding */
@@ -55,13 +48,10 @@ struct bfa_sm_table {
55}; 48};
56#define BFA_SM(_sm) ((bfa_sm_t)(_sm)) 49#define BFA_SM(_sm) ((bfa_sm_t)(_sm))
57 50
58/** 51/* State machine with entry actions. */
59 * State machine with entry actions.
60 */
61typedef void (*bfa_fsm_t)(void *fsm, int event); 52typedef void (*bfa_fsm_t)(void *fsm, int event);
62 53
63/** 54/* oc - object class eg. bfa_ioc
64 * oc - object class eg. bfa_ioc
65 * st - state, eg. reset 55 * st - state, eg. reset
66 * otype - object type, eg. struct bfa_ioc 56 * otype - object type, eg. struct bfa_ioc
67 * etype - object type, eg. enum ioc_event 57 * etype - object type, eg. enum ioc_event
@@ -90,9 +80,7 @@ bfa_sm_to_state(const struct bfa_sm_table *smt, bfa_sm_t sm)
90 return smt[i].state; 80 return smt[i].state;
91} 81}
92 82
93/** 83/* Generic wait counter. */
94 * @ Generic wait counter.
95 */
96 84
97typedef void (*bfa_wc_resume_t) (void *cbarg); 85typedef void (*bfa_wc_resume_t) (void *cbarg);
98 86
@@ -116,9 +104,7 @@ bfa_wc_down(struct bfa_wc *wc)
116 wc->wc_resume(wc->wc_cbarg); 104 wc->wc_resume(wc->wc_cbarg);
117} 105}
118 106
119/** 107/* Initialize a waiting counter. */
120 * Initialize a waiting counter.
121 */
122static inline void 108static inline void
123bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg) 109bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
124{ 110{
@@ -128,9 +114,7 @@ bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
128 bfa_wc_up(wc); 114 bfa_wc_up(wc);
129} 115}
130 116
131/** 117/* Wait for counter to reach zero */
132 * Wait for counter to reach zero
133 */
134static inline void 118static inline void
135bfa_wc_wait(struct bfa_wc *wc) 119bfa_wc_wait(struct bfa_wc *wc)
136{ 120{
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h
index 48f877337390..e423f82da490 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h
@@ -26,13 +26,9 @@
26#define BFA_STRING_32 32 26#define BFA_STRING_32 32
27#define BFA_VERSION_LEN 64 27#define BFA_VERSION_LEN 64
28 28
29/** 29/* ---------------------- adapter definitions ------------ */
30 * ---------------------- adapter definitions ------------
31 */
32 30
33/** 31/* BFA adapter level attributes. */
34 * BFA adapter level attributes.
35 */
36enum { 32enum {
37 BFA_ADAPTER_SERIAL_NUM_LEN = STRSZ(BFA_MFG_SERIALNUM_SIZE), 33 BFA_ADAPTER_SERIAL_NUM_LEN = STRSZ(BFA_MFG_SERIALNUM_SIZE),
38 /* 34 /*
@@ -74,18 +70,14 @@ struct bfa_adapter_attr {
74 u8 trunk_capable; 70 u8 trunk_capable;
75}; 71};
76 72
77/** 73/* ---------------------- IOC definitions ------------ */
78 * ---------------------- IOC definitions ------------
79 */
80 74
81enum { 75enum {
82 BFA_IOC_DRIVER_LEN = 16, 76 BFA_IOC_DRIVER_LEN = 16,
83 BFA_IOC_CHIP_REV_LEN = 8, 77 BFA_IOC_CHIP_REV_LEN = 8,
84}; 78};
85 79
86/** 80/* Driver and firmware versions. */
87 * Driver and firmware versions.
88 */
89struct bfa_ioc_driver_attr { 81struct bfa_ioc_driver_attr {
90 char driver[BFA_IOC_DRIVER_LEN]; /*!< driver name */ 82 char driver[BFA_IOC_DRIVER_LEN]; /*!< driver name */
91 char driver_ver[BFA_VERSION_LEN]; /*!< driver version */ 83 char driver_ver[BFA_VERSION_LEN]; /*!< driver version */
@@ -95,9 +87,7 @@ struct bfa_ioc_driver_attr {
95 char ob_ver[BFA_VERSION_LEN]; /*!< openboot version */ 87 char ob_ver[BFA_VERSION_LEN]; /*!< openboot version */
96}; 88};
97 89
98/** 90/* IOC PCI device attributes */
99 * IOC PCI device attributes
100 */
101struct bfa_ioc_pci_attr { 91struct bfa_ioc_pci_attr {
102 u16 vendor_id; /*!< PCI vendor ID */ 92 u16 vendor_id; /*!< PCI vendor ID */
103 u16 device_id; /*!< PCI device ID */ 93 u16 device_id; /*!< PCI device ID */
@@ -108,9 +98,7 @@ struct bfa_ioc_pci_attr {
108 char chip_rev[BFA_IOC_CHIP_REV_LEN]; /*!< chip revision */ 98 char chip_rev[BFA_IOC_CHIP_REV_LEN]; /*!< chip revision */
109}; 99};
110 100
111/** 101/* IOC states */
112 * IOC states
113 */
114enum bfa_ioc_state { 102enum bfa_ioc_state {
115 BFA_IOC_UNINIT = 1, /*!< IOC is in uninit state */ 103 BFA_IOC_UNINIT = 1, /*!< IOC is in uninit state */
116 BFA_IOC_RESET = 2, /*!< IOC is in reset state */ 104 BFA_IOC_RESET = 2, /*!< IOC is in reset state */
@@ -127,9 +115,7 @@ enum bfa_ioc_state {
127 BFA_IOC_HWFAIL = 13, /*!< PCI mapping doesn't exist */ 115 BFA_IOC_HWFAIL = 13, /*!< PCI mapping doesn't exist */
128}; 116};
129 117
130/** 118/* IOC firmware stats */
131 * IOC firmware stats
132 */
133struct bfa_fw_ioc_stats { 119struct bfa_fw_ioc_stats {
134 u32 enable_reqs; 120 u32 enable_reqs;
135 u32 disable_reqs; 121 u32 disable_reqs;
@@ -139,9 +125,7 @@ struct bfa_fw_ioc_stats {
139 u32 unknown_reqs; 125 u32 unknown_reqs;
140}; 126};
141 127
142/** 128/* IOC driver stats */
143 * IOC driver stats
144 */
145struct bfa_ioc_drv_stats { 129struct bfa_ioc_drv_stats {
146 u32 ioc_isrs; 130 u32 ioc_isrs;
147 u32 ioc_enables; 131 u32 ioc_enables;
@@ -157,9 +141,7 @@ struct bfa_ioc_drv_stats {
157 u32 rsvd; 141 u32 rsvd;
158}; 142};
159 143
160/** 144/* IOC statistics */
161 * IOC statistics
162 */
163struct bfa_ioc_stats { 145struct bfa_ioc_stats {
164 struct bfa_ioc_drv_stats drv_stats; /*!< driver IOC stats */ 146 struct bfa_ioc_drv_stats drv_stats; /*!< driver IOC stats */
165 struct bfa_fw_ioc_stats fw_stats; /*!< firmware IOC stats */ 147 struct bfa_fw_ioc_stats fw_stats; /*!< firmware IOC stats */
@@ -171,9 +153,7 @@ enum bfa_ioc_type {
171 BFA_IOC_TYPE_LL = 3, 153 BFA_IOC_TYPE_LL = 3,
172}; 154};
173 155
174/** 156/* IOC attributes returned in queries */
175 * IOC attributes returned in queries
176 */
177struct bfa_ioc_attr { 157struct bfa_ioc_attr {
178 enum bfa_ioc_type ioc_type; 158 enum bfa_ioc_type ioc_type;
179 enum bfa_ioc_state state; /*!< IOC state */ 159 enum bfa_ioc_state state; /*!< IOC state */
@@ -187,22 +167,16 @@ struct bfa_ioc_attr {
187 u8 rsvd[4]; /*!< 64bit align */ 167 u8 rsvd[4]; /*!< 64bit align */
188}; 168};
189 169
190/** 170/* Adapter capability mask definition */
191 * Adapter capability mask definition
192 */
193enum { 171enum {
194 BFA_CM_HBA = 0x01, 172 BFA_CM_HBA = 0x01,
195 BFA_CM_CNA = 0x02, 173 BFA_CM_CNA = 0x02,
196 BFA_CM_NIC = 0x04, 174 BFA_CM_NIC = 0x04,
197}; 175};
198 176
199/** 177/* ---------------------- mfg definitions ------------ */
200 * ---------------------- mfg definitions ------------
201 */
202 178
203/** 179/* Checksum size */
204 * Checksum size
205 */
206#define BFA_MFG_CHKSUM_SIZE 16 180#define BFA_MFG_CHKSUM_SIZE 16
207 181
208#define BFA_MFG_PARTNUM_SIZE 14 182#define BFA_MFG_PARTNUM_SIZE 14
@@ -213,8 +187,7 @@ enum {
213 187
214#pragma pack(1) 188#pragma pack(1)
215 189
216/** 190/* BFA adapter manufacturing block definition.
217 * @brief BFA adapter manufacturing block definition.
218 * 191 *
219 * All numerical fields are in big-endian format. 192 * All numerical fields are in big-endian format.
220 */ 193 */
@@ -256,9 +229,7 @@ struct bfa_mfg_block {
256 229
257#pragma pack() 230#pragma pack()
258 231
259/** 232/* ---------------------- pci definitions ------------ */
260 * ---------------------- pci definitions ------------
261 */
262 233
263/* 234/*
264 * PCI device ID information 235 * PCI device ID information
@@ -275,9 +246,7 @@ enum {
275#define bfa_asic_id_ctc(device) \ 246#define bfa_asic_id_ctc(device) \
276 (bfa_asic_id_ct(device) || bfa_asic_id_ct2(device)) 247 (bfa_asic_id_ct(device) || bfa_asic_id_ct2(device))
277 248
278/** 249/* PCI sub-system device and vendor ID information */
279 * PCI sub-system device and vendor ID information
280 */
281enum { 250enum {
282 BFA_PCI_FCOE_SSDEVICE_ID = 0x14, 251 BFA_PCI_FCOE_SSDEVICE_ID = 0x14,
283 BFA_PCI_CT2_SSID_FCoE = 0x22, 252 BFA_PCI_CT2_SSID_FCoE = 0x22,
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
index 8ab33ee2c2bc..b39c5f23974b 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
@@ -20,10 +20,7 @@
20 20
21#include "bfa_defs.h" 21#include "bfa_defs.h"
22 22
23/** 23/* FC physical port statistics. */
24 * @brief
25 * FC physical port statistics.
26 */
27struct bfa_port_fc_stats { 24struct bfa_port_fc_stats {
28 u64 secs_reset; /*!< Seconds since stats is reset */ 25 u64 secs_reset; /*!< Seconds since stats is reset */
29 u64 tx_frames; /*!< Tx frames */ 26 u64 tx_frames; /*!< Tx frames */
@@ -59,10 +56,7 @@ struct bfa_port_fc_stats {
59 u64 bbsc_link_resets; /*!< Credit Recovery-Link Resets */ 56 u64 bbsc_link_resets; /*!< Credit Recovery-Link Resets */
60}; 57};
61 58
62/** 59/* Eth Physical Port statistics. */
63 * @brief
64 * Eth Physical Port statistics.
65 */
66struct bfa_port_eth_stats { 60struct bfa_port_eth_stats {
67 u64 secs_reset; /*!< Seconds since stats is reset */ 61 u64 secs_reset; /*!< Seconds since stats is reset */
68 u64 frame_64; /*!< Frames 64 bytes */ 62 u64 frame_64; /*!< Frames 64 bytes */
@@ -108,10 +102,7 @@ struct bfa_port_eth_stats {
108 u64 tx_iscsi_zero_pause; /*!< Tx iSCSI zero pause */ 102 u64 tx_iscsi_zero_pause; /*!< Tx iSCSI zero pause */
109}; 103};
110 104
111/** 105/* Port statistics. */
112 * @brief
113 * Port statistics.
114 */
115union bfa_port_stats_u { 106union bfa_port_stats_u {
116 struct bfa_port_fc_stats fc; 107 struct bfa_port_fc_stats fc;
117 struct bfa_port_eth_stats eth; 108 struct bfa_port_eth_stats eth;
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
index 6681fe87c1e1..7fb396fe679d 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
@@ -20,33 +20,23 @@
20 20
21#include "bfa_defs.h" 21#include "bfa_defs.h"
22 22
23/** 23/* Manufacturing block version */
24 * Manufacturing block version
25 */
26#define BFA_MFG_VERSION 3 24#define BFA_MFG_VERSION 3
27#define BFA_MFG_VERSION_UNINIT 0xFF 25#define BFA_MFG_VERSION_UNINIT 0xFF
28 26
29/** 27/* Manufacturing block encrypted version */
30 * Manufacturing block encrypted version
31 */
32#define BFA_MFG_ENC_VER 2 28#define BFA_MFG_ENC_VER 2
33 29
34/** 30/* Manufacturing block version 1 length */
35 * Manufacturing block version 1 length
36 */
37#define BFA_MFG_VER1_LEN 128 31#define BFA_MFG_VER1_LEN 128
38 32
39/** 33/* Manufacturing block header length */
40 * Manufacturing block header length
41 */
42#define BFA_MFG_HDR_LEN 4 34#define BFA_MFG_HDR_LEN 4
43 35
44#define BFA_MFG_SERIALNUM_SIZE 11 36#define BFA_MFG_SERIALNUM_SIZE 11
45#define STRSZ(_n) (((_n) + 4) & ~3) 37#define STRSZ(_n) (((_n) + 4) & ~3)
46 38
47/** 39/* Manufacturing card type */
48 * Manufacturing card type
49 */
50enum { 40enum {
51 BFA_MFG_TYPE_CB_MAX = 825, /*!< Crossbow card type max */ 41 BFA_MFG_TYPE_CB_MAX = 825, /*!< Crossbow card type max */
52 BFA_MFG_TYPE_FC8P2 = 825, /*!< 8G 2port FC card */ 42 BFA_MFG_TYPE_FC8P2 = 825, /*!< 8G 2port FC card */
@@ -70,9 +60,7 @@ enum {
70 60
71#pragma pack(1) 61#pragma pack(1)
72 62
73/** 63/* Check if Mezz card */
74 * Check if Mezz card
75 */
76#define bfa_mfg_is_mezz(type) (( \ 64#define bfa_mfg_is_mezz(type) (( \
77 (type) == BFA_MFG_TYPE_JAYHAWK || \ 65 (type) == BFA_MFG_TYPE_JAYHAWK || \
78 (type) == BFA_MFG_TYPE_WANCHESE || \ 66 (type) == BFA_MFG_TYPE_WANCHESE || \
@@ -127,9 +115,7 @@ do { \
127 } \ 115 } \
128} while (0) 116} while (0)
129 117
130/** 118/* VPD data length */
131 * VPD data length
132 */
133#define BFA_MFG_VPD_LEN 512 119#define BFA_MFG_VPD_LEN 512
134#define BFA_MFG_VPD_LEN_INVALID 0 120#define BFA_MFG_VPD_LEN_INVALID 0
135 121
@@ -137,9 +123,7 @@ do { \
137#define BFA_MFG_VPD_PCI_VER_MASK 0x07 /*!< version mask 3 bits */ 123#define BFA_MFG_VPD_PCI_VER_MASK 0x07 /*!< version mask 3 bits */
138#define BFA_MFG_VPD_PCI_VDR_MASK 0xf8 /*!< vendor mask 5 bits */ 124#define BFA_MFG_VPD_PCI_VDR_MASK 0xf8 /*!< vendor mask 5 bits */
139 125
140/** 126/* VPD vendor tag */
141 * VPD vendor tag
142 */
143enum { 127enum {
144 BFA_MFG_VPD_UNKNOWN = 0, /*!< vendor unknown */ 128 BFA_MFG_VPD_UNKNOWN = 0, /*!< vendor unknown */
145 BFA_MFG_VPD_IBM = 1, /*!< vendor IBM */ 129 BFA_MFG_VPD_IBM = 1, /*!< vendor IBM */
@@ -151,8 +135,7 @@ enum {
151 BFA_MFG_VPD_PCI_BRCD = 0xf8, /*!< PCI VPD Brocade */ 135 BFA_MFG_VPD_PCI_BRCD = 0xf8, /*!< PCI VPD Brocade */
152}; 136};
153 137
154/** 138/* BFA adapter flash vpd data definition.
155 * @brief BFA adapter flash vpd data definition.
156 * 139 *
157 * All numerical fields are in big-endian format. 140 * All numerical fields are in big-endian format.
158 */ 141 */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_status.h b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h
index 7c5fe6c2e80e..ea9af9ae754d 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs_status.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h
@@ -18,8 +18,7 @@
18#ifndef __BFA_DEFS_STATUS_H__ 18#ifndef __BFA_DEFS_STATUS_H__
19#define __BFA_DEFS_STATUS_H__ 19#define __BFA_DEFS_STATUS_H__
20 20
21/** 21/* API status return values
22 * API status return values
23 * 22 *
24 * NOTE: The error msgs are auto generated from the comments. Only singe line 23 * NOTE: The error msgs are auto generated from the comments. Only singe line
25 * comments are supported 24 * comments are supported
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 0b640fafbda3..959c58ef972a 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -20,13 +20,9 @@
20#include "bfi_reg.h" 20#include "bfi_reg.h"
21#include "bfa_defs.h" 21#include "bfa_defs.h"
22 22
23/** 23/* IOC local definitions */
24 * IOC local definitions
25 */
26 24
27/** 25/* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */
28 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
29 */
30 26
31#define bfa_ioc_firmware_lock(__ioc) \ 27#define bfa_ioc_firmware_lock(__ioc) \
32 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc)) 28 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
@@ -96,9 +92,7 @@ static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
96static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model); 92static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
97static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc); 93static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
98 94
99/** 95/* IOC state machine definitions/declarations */
100 * IOC state machine definitions/declarations
101 */
102enum ioc_event { 96enum ioc_event {
103 IOC_E_RESET = 1, /*!< IOC reset request */ 97 IOC_E_RESET = 1, /*!< IOC reset request */
104 IOC_E_ENABLE = 2, /*!< IOC enable request */ 98 IOC_E_ENABLE = 2, /*!< IOC enable request */
@@ -148,9 +142,7 @@ static void bfa_iocpf_initfail(struct bfa_ioc *ioc);
148static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc); 142static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc);
149static void bfa_iocpf_stop(struct bfa_ioc *ioc); 143static void bfa_iocpf_stop(struct bfa_ioc *ioc);
150 144
151/** 145/* IOCPF state machine events */
152 * IOCPF state machine events
153 */
154enum iocpf_event { 146enum iocpf_event {
155 IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */ 147 IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */
156 IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */ 148 IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */
@@ -166,9 +158,7 @@ enum iocpf_event {
166 IOCPF_E_SEM_ERROR = 12, /*!< h/w sem mapping error */ 158 IOCPF_E_SEM_ERROR = 12, /*!< h/w sem mapping error */
167}; 159};
168 160
169/** 161/* IOCPF states */
170 * IOCPF states
171 */
172enum bfa_iocpf_state { 162enum bfa_iocpf_state {
173 BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */ 163 BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */
174 BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */ 164 BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */
@@ -215,21 +205,15 @@ static struct bfa_sm_table iocpf_sm_table[] = {
215 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED}, 205 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
216}; 206};
217 207
218/** 208/* IOC State Machine */
219 * IOC State Machine
220 */
221 209
222/** 210/* Beginning state. IOC uninit state. */
223 * Beginning state. IOC uninit state.
224 */
225static void 211static void
226bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc) 212bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc)
227{ 213{
228} 214}
229 215
230/** 216/* IOC is in uninit state. */
231 * IOC is in uninit state.
232 */
233static void 217static void
234bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event) 218bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
235{ 219{
@@ -243,18 +227,14 @@ bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
243 } 227 }
244} 228}
245 229
246/** 230/* Reset entry actions -- initialize state machine */
247 * Reset entry actions -- initialize state machine
248 */
249static void 231static void
250bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc) 232bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
251{ 233{
252 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset); 234 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
253} 235}
254 236
255/** 237/* IOC is in reset state. */
256 * IOC is in reset state.
257 */
258static void 238static void
259bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event) 239bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
260{ 240{
@@ -282,8 +262,7 @@ bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
282 bfa_iocpf_enable(ioc); 262 bfa_iocpf_enable(ioc);
283} 263}
284 264
285/** 265/* Host IOC function is being enabled, awaiting response from firmware.
286 * Host IOC function is being enabled, awaiting response from firmware.
287 * Semaphore is acquired. 266 * Semaphore is acquired.
288 */ 267 */
289static void 268static void
@@ -325,9 +304,7 @@ bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
325 } 304 }
326} 305}
327 306
328/** 307/* Semaphore should be acquired for version check. */
329 * Semaphore should be acquired for version check.
330 */
331static void 308static void
332bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc) 309bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
333{ 310{
@@ -336,9 +313,7 @@ bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
336 bfa_ioc_send_getattr(ioc); 313 bfa_ioc_send_getattr(ioc);
337} 314}
338 315
339/** 316/* IOC configuration in progress. Timer is active. */
340 * IOC configuration in progress. Timer is active.
341 */
342static void 317static void
343bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event) 318bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
344{ 319{
@@ -419,9 +394,7 @@ bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
419 bfa_iocpf_disable(ioc); 394 bfa_iocpf_disable(ioc);
420} 395}
421 396
422/** 397/* IOC is being disabled */
423 * IOC is being disabled
424 */
425static void 398static void
426bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event) 399bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
427{ 400{
@@ -449,9 +422,7 @@ bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
449 } 422 }
450} 423}
451 424
452/** 425/* IOC disable completion entry. */
453 * IOC disable completion entry.
454 */
455static void 426static void
456bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc) 427bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
457{ 428{
@@ -485,9 +456,7 @@ bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc)
485{ 456{
486} 457}
487 458
488/** 459/* Hardware initialization retry. */
489 * Hardware initialization retry.
490 */
491static void 460static void
492bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event) 461bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
493{ 462{
@@ -534,9 +503,7 @@ bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc)
534{ 503{
535} 504}
536 505
537/** 506/* IOC failure. */
538 * IOC failure.
539 */
540static void 507static void
541bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event) 508bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
542{ 509{
@@ -568,9 +535,7 @@ bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc)
568{ 535{
569} 536}
570 537
571/** 538/* IOC failure. */
572 * IOC failure.
573 */
574static void 539static void
575bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event) 540bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event)
576{ 541{
@@ -593,13 +558,9 @@ bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event)
593 } 558 }
594} 559}
595 560
596/** 561/* IOCPF State Machine */
597 * IOCPF State Machine
598 */
599 562
600/** 563/* Reset entry actions -- initialize state machine */
601 * Reset entry actions -- initialize state machine
602 */
603static void 564static void
604bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf) 565bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
605{ 566{
@@ -607,9 +568,7 @@ bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
607 iocpf->auto_recover = bfa_nw_auto_recover; 568 iocpf->auto_recover = bfa_nw_auto_recover;
608} 569}
609 570
610/** 571/* Beginning state. IOC is in reset state. */
611 * Beginning state. IOC is in reset state.
612 */
613static void 572static void
614bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event) 573bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
615{ 574{
@@ -626,9 +585,7 @@ bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
626 } 585 }
627} 586}
628 587
629/** 588/* Semaphore should be acquired for version check. */
630 * Semaphore should be acquired for version check.
631 */
632static void 589static void
633bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf) 590bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
634{ 591{
@@ -636,9 +593,7 @@ bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
636 bfa_ioc_hw_sem_get(iocpf->ioc); 593 bfa_ioc_hw_sem_get(iocpf->ioc);
637} 594}
638 595
639/** 596/* Awaiting h/w semaphore to continue with version check. */
640 * Awaiting h/w semaphore to continue with version check.
641 */
642static void 597static void
643bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event) 598bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
644{ 599{
@@ -683,9 +638,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
683 } 638 }
684} 639}
685 640
686/** 641/* Notify enable completion callback */
687 * Notify enable completion callback
688 */
689static void 642static void
690bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf) 643bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
691{ 644{
@@ -698,9 +651,7 @@ bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
698 msecs_to_jiffies(BFA_IOC_TOV)); 651 msecs_to_jiffies(BFA_IOC_TOV));
699} 652}
700 653
701/** 654/* Awaiting firmware version match. */
702 * Awaiting firmware version match.
703 */
704static void 655static void
705bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event) 656bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
706{ 657{
@@ -727,18 +678,14 @@ bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
727 } 678 }
728} 679}
729 680
730/** 681/* Request for semaphore. */
731 * Request for semaphore.
732 */
733static void 682static void
734bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf) 683bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf)
735{ 684{
736 bfa_ioc_hw_sem_get(iocpf->ioc); 685 bfa_ioc_hw_sem_get(iocpf->ioc);
737} 686}
738 687
739/** 688/* Awaiting semaphore for h/w initialzation. */
740 * Awaiting semaphore for h/w initialzation.
741 */
742static void 689static void
743bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event) 690bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
744{ 691{
@@ -778,8 +725,7 @@ bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
778 bfa_ioc_reset(iocpf->ioc, false); 725 bfa_ioc_reset(iocpf->ioc, false);
779} 726}
780 727
781/** 728/* Hardware is being initialized. Interrupts are enabled.
782 * Hardware is being initialized. Interrupts are enabled.
783 * Holding hardware semaphore lock. 729 * Holding hardware semaphore lock.
784 */ 730 */
785static void 731static void
@@ -822,8 +768,7 @@ bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
822 bfa_ioc_send_enable(iocpf->ioc); 768 bfa_ioc_send_enable(iocpf->ioc);
823} 769}
824 770
825/** 771/* Host IOC function is being enabled, awaiting response from firmware.
826 * Host IOC function is being enabled, awaiting response from firmware.
827 * Semaphore is acquired. 772 * Semaphore is acquired.
828 */ 773 */
829static void 774static void
@@ -896,9 +841,7 @@ bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf)
896 bfa_ioc_send_disable(iocpf->ioc); 841 bfa_ioc_send_disable(iocpf->ioc);
897} 842}
898 843
899/** 844/* IOC is being disabled */
900 * IOC is being disabled
901 */
902static void 845static void
903bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event) 846bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
904{ 847{
@@ -935,9 +878,7 @@ bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf)
935 bfa_ioc_hw_sem_get(iocpf->ioc); 878 bfa_ioc_hw_sem_get(iocpf->ioc);
936} 879}
937 880
938/** 881/* IOC hb ack request is being removed. */
939 * IOC hb ack request is being removed.
940 */
941static void 882static void
942bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) 883bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
943{ 884{
@@ -963,9 +904,7 @@ bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
963 } 904 }
964} 905}
965 906
966/** 907/* IOC disable completion entry. */
967 * IOC disable completion entry.
968 */
969static void 908static void
970bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf) 909bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
971{ 910{
@@ -1000,9 +939,7 @@ bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
1000 bfa_ioc_hw_sem_get(iocpf->ioc); 939 bfa_ioc_hw_sem_get(iocpf->ioc);
1001} 940}
1002 941
1003/** 942/* Hardware initialization failed. */
1004 * Hardware initialization failed.
1005 */
1006static void 943static void
1007bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) 944bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
1008{ 945{
@@ -1046,9 +983,7 @@ bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
1046{ 983{
1047} 984}
1048 985
1049/** 986/* Hardware initialization failed. */
1050 * Hardware initialization failed.
1051 */
1052static void 987static void
1053bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event) 988bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1054{ 989{
@@ -1084,9 +1019,7 @@ bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
1084 bfa_ioc_hw_sem_get(iocpf->ioc); 1019 bfa_ioc_hw_sem_get(iocpf->ioc);
1085} 1020}
1086 1021
1087/** 1022/* IOC is in failed state. */
1088 * IOC is in failed state.
1089 */
1090static void 1023static void
1091bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) 1024bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
1092{ 1025{
@@ -1134,10 +1067,7 @@ bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf)
1134{ 1067{
1135} 1068}
1136 1069
1137/** 1070/* IOC is in failed state. */
1138 * @brief
1139 * IOC is in failed state.
1140 */
1141static void 1071static void
1142bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event) 1072bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1143{ 1073{
@@ -1151,13 +1081,9 @@ bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1151 } 1081 }
1152} 1082}
1153 1083
1154/** 1084/* BFA IOC private functions */
1155 * BFA IOC private functions
1156 */
1157 1085
1158/** 1086/* Notify common modules registered for notification. */
1159 * Notify common modules registered for notification.
1160 */
1161static void 1087static void
1162bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event) 1088bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event)
1163{ 1089{
@@ -1298,10 +1224,7 @@ bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
1298 del_timer(&ioc->sem_timer); 1224 del_timer(&ioc->sem_timer);
1299} 1225}
1300 1226
1301/** 1227/* Initialize LPU local memory (aka secondary memory / SRAM) */
1302 * @brief
1303 * Initialize LPU local memory (aka secondary memory / SRAM)
1304 */
1305static void 1228static void
1306bfa_ioc_lmem_init(struct bfa_ioc *ioc) 1229bfa_ioc_lmem_init(struct bfa_ioc *ioc)
1307{ 1230{
@@ -1366,9 +1289,7 @@ bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
1366 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); 1289 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1367} 1290}
1368 1291
1369/** 1292/* Get driver and firmware versions. */
1370 * Get driver and firmware versions.
1371 */
1372void 1293void
1373bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) 1294bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1374{ 1295{
@@ -1388,9 +1309,7 @@ bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1388 } 1309 }
1389} 1310}
1390 1311
1391/** 1312/* Returns TRUE if same. */
1392 * Returns TRUE if same.
1393 */
1394bool 1313bool
1395bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) 1314bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1396{ 1315{
@@ -1408,8 +1327,7 @@ bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1408 return true; 1327 return true;
1409} 1328}
1410 1329
1411/** 1330/* Return true if current running version is valid. Firmware signature and
1412 * Return true if current running version is valid. Firmware signature and
1413 * execution context (driver/bios) must match. 1331 * execution context (driver/bios) must match.
1414 */ 1332 */
1415static bool 1333static bool
@@ -1430,9 +1348,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
1430 return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr); 1348 return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
1431} 1349}
1432 1350
1433/** 1351/* Conditionally flush any pending message from firmware at start. */
1434 * Conditionally flush any pending message from firmware at start.
1435 */
1436static void 1352static void
1437bfa_ioc_msgflush(struct bfa_ioc *ioc) 1353bfa_ioc_msgflush(struct bfa_ioc *ioc)
1438{ 1354{
@@ -1443,9 +1359,6 @@ bfa_ioc_msgflush(struct bfa_ioc *ioc)
1443 writel(1, ioc->ioc_regs.lpu_mbox_cmd); 1359 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1444} 1360}
1445 1361
1446/**
1447 * @img ioc_init_logic.jpg
1448 */
1449static void 1362static void
1450bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force) 1363bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
1451{ 1364{
@@ -1603,10 +1516,7 @@ bfa_ioc_hb_stop(struct bfa_ioc *ioc)
1603 del_timer(&ioc->hb_timer); 1516 del_timer(&ioc->hb_timer);
1604} 1517}
1605 1518
1606/** 1519/* Initiate a full firmware download. */
1607 * @brief
1608 * Initiate a full firmware download.
1609 */
1610static void 1520static void
1611bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type, 1521bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1612 u32 boot_env) 1522 u32 boot_env)
@@ -1672,9 +1582,7 @@ bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
1672 bfa_ioc_hwinit(ioc, force); 1582 bfa_ioc_hwinit(ioc, force);
1673} 1583}
1674 1584
1675/** 1585/* BFA ioc enable reply by firmware */
1676 * BFA ioc enable reply by firmware
1677 */
1678static void 1586static void
1679bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode, 1587bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode,
1680 u8 cap_bm) 1588 u8 cap_bm)
@@ -1686,10 +1594,7 @@ bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode,
1686 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE); 1594 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
1687} 1595}
1688 1596
1689/** 1597/* Update BFA configuration from firmware configuration. */
1690 * @brief
1691 * Update BFA configuration from firmware configuration.
1692 */
1693static void 1598static void
1694bfa_ioc_getattr_reply(struct bfa_ioc *ioc) 1599bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
1695{ 1600{
@@ -1702,9 +1607,7 @@ bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
1702 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); 1607 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1703} 1608}
1704 1609
1705/** 1610/* Attach time initialization of mbox logic. */
1706 * Attach time initialization of mbox logic.
1707 */
1708static void 1611static void
1709bfa_ioc_mbox_attach(struct bfa_ioc *ioc) 1612bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
1710{ 1613{
@@ -1718,9 +1621,7 @@ bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
1718 } 1621 }
1719} 1622}
1720 1623
1721/** 1624/* Mbox poll timer -- restarts any pending mailbox requests. */
1722 * Mbox poll timer -- restarts any pending mailbox requests.
1723 */
1724static void 1625static void
1725bfa_ioc_mbox_poll(struct bfa_ioc *ioc) 1626bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
1726{ 1627{
@@ -1760,9 +1661,7 @@ bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
1760 } 1661 }
1761} 1662}
1762 1663
1763/** 1664/* Cleanup any pending requests. */
1764 * Cleanup any pending requests.
1765 */
1766static void 1665static void
1767bfa_ioc_mbox_flush(struct bfa_ioc *ioc) 1666bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
1768{ 1667{
@@ -1774,12 +1673,12 @@ bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
1774} 1673}
1775 1674
1776/** 1675/**
1777 * Read data from SMEM to host through PCI memmap 1676 * bfa_nw_ioc_smem_read - Read data from SMEM to host through PCI memmap
1778 * 1677 *
1779 * @param[in] ioc memory for IOC 1678 * @ioc: memory for IOC
1780 * @param[in] tbuf app memory to store data from smem 1679 * @tbuf: app memory to store data from smem
1781 * @param[in] soff smem offset 1680 * @soff: smem offset
1782 * @param[in] sz size of smem in bytes 1681 * @sz: size of smem in bytes
1783 */ 1682 */
1784static int 1683static int
1785bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz) 1684bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
@@ -1826,9 +1725,7 @@ bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
1826 return 0; 1725 return 0;
1827} 1726}
1828 1727
1829/** 1728/* Retrieve saved firmware trace from a prior IOC failure. */
1830 * Retrieve saved firmware trace from a prior IOC failure.
1831 */
1832int 1729int
1833bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen) 1730bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen)
1834{ 1731{
@@ -1844,9 +1741,7 @@ bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen)
1844 return status; 1741 return status;
1845} 1742}
1846 1743
1847/** 1744/* Save firmware trace if configured. */
1848 * Save firmware trace if configured.
1849 */
1850static void 1745static void
1851bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc) 1746bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
1852{ 1747{
@@ -1861,9 +1756,7 @@ bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
1861 } 1756 }
1862} 1757}
1863 1758
1864/** 1759/* Retrieve saved firmware trace from a prior IOC failure. */
1865 * Retrieve saved firmware trace from a prior IOC failure.
1866 */
1867int 1760int
1868bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen) 1761bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen)
1869{ 1762{
@@ -1892,9 +1785,7 @@ bfa_ioc_fail_notify(struct bfa_ioc *ioc)
1892 bfa_nw_ioc_debug_save_ftrc(ioc); 1785 bfa_nw_ioc_debug_save_ftrc(ioc);
1893} 1786}
1894 1787
1895/** 1788/* IOCPF to IOC interface */
1896 * IOCPF to IOC interface
1897 */
1898static void 1789static void
1899bfa_ioc_pf_enabled(struct bfa_ioc *ioc) 1790bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
1900{ 1791{
@@ -1928,9 +1819,7 @@ bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc)
1928 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 1819 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1929} 1820}
1930 1821
1931/** 1822/* IOC public */
1932 * IOC public
1933 */
1934static enum bfa_status 1823static enum bfa_status
1935bfa_ioc_pll_init(struct bfa_ioc *ioc) 1824bfa_ioc_pll_init(struct bfa_ioc *ioc)
1936{ 1825{
@@ -1954,8 +1843,7 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc)
1954 return BFA_STATUS_OK; 1843 return BFA_STATUS_OK;
1955} 1844}
1956 1845
1957/** 1846/* Interface used by diag module to do firmware boot with memory test
1958 * Interface used by diag module to do firmware boot with memory test
1959 * as the entry vector. 1847 * as the entry vector.
1960 */ 1848 */
1961static void 1849static void
@@ -1983,9 +1871,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
1983 bfa_ioc_lpu_start(ioc); 1871 bfa_ioc_lpu_start(ioc);
1984} 1872}
1985 1873
1986/** 1874/* Enable/disable IOC failure auto recovery. */
1987 * Enable/disable IOC failure auto recovery.
1988 */
1989void 1875void
1990bfa_nw_ioc_auto_recover(bool auto_recover) 1876bfa_nw_ioc_auto_recover(bool auto_recover)
1991{ 1877{
@@ -2056,10 +1942,10 @@ bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
2056} 1942}
2057 1943
2058/** 1944/**
2059 * IOC attach time initialization and setup. 1945 * bfa_nw_ioc_attach - IOC attach time initialization and setup.
2060 * 1946 *
2061 * @param[in] ioc memory for IOC 1947 * @ioc: memory for IOC
2062 * @param[in] bfa driver instance structure 1948 * @bfa: driver instance structure
2063 */ 1949 */
2064void 1950void
2065bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn) 1951bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
@@ -2078,9 +1964,7 @@ bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
2078 bfa_fsm_send_event(ioc, IOC_E_RESET); 1964 bfa_fsm_send_event(ioc, IOC_E_RESET);
2079} 1965}
2080 1966
2081/** 1967/* Driver detach time IOC cleanup. */
2082 * Driver detach time IOC cleanup.
2083 */
2084void 1968void
2085bfa_nw_ioc_detach(struct bfa_ioc *ioc) 1969bfa_nw_ioc_detach(struct bfa_ioc *ioc)
2086{ 1970{
@@ -2091,9 +1975,9 @@ bfa_nw_ioc_detach(struct bfa_ioc *ioc)
2091} 1975}
2092 1976
2093/** 1977/**
2094 * Setup IOC PCI properties. 1978 * bfa_nw_ioc_pci_init - Setup IOC PCI properties.
2095 * 1979 *
2096 * @param[in] pcidev PCI device information for this IOC 1980 * @pcidev: PCI device information for this IOC
2097 */ 1981 */
2098void 1982void
2099bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev, 1983bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
@@ -2160,10 +2044,10 @@ bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
2160} 2044}
2161 2045
2162/** 2046/**
2163 * Initialize IOC dma memory 2047 * bfa_nw_ioc_mem_claim - Initialize IOC dma memory
2164 * 2048 *
2165 * @param[in] dm_kva kernel virtual address of IOC dma memory 2049 * @dm_kva: kernel virtual address of IOC dma memory
2166 * @param[in] dm_pa physical address of IOC dma memory 2050 * @dm_pa: physical address of IOC dma memory
2167 */ 2051 */
2168void 2052void
2169bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa) 2053bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
@@ -2176,9 +2060,7 @@ bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
2176 ioc->attr = (struct bfi_ioc_attr *) dm_kva; 2060 ioc->attr = (struct bfi_ioc_attr *) dm_kva;
2177} 2061}
2178 2062
2179/** 2063/* Return size of dma memory required. */
2180 * Return size of dma memory required.
2181 */
2182u32 2064u32
2183bfa_nw_ioc_meminfo(void) 2065bfa_nw_ioc_meminfo(void)
2184{ 2066{
@@ -2201,9 +2083,7 @@ bfa_nw_ioc_disable(struct bfa_ioc *ioc)
2201 bfa_fsm_send_event(ioc, IOC_E_DISABLE); 2083 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2202} 2084}
2203 2085
2204/** 2086/* Initialize memory for saving firmware trace. */
2205 * Initialize memory for saving firmware trace.
2206 */
2207void 2087void
2208bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave) 2088bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave)
2209{ 2089{
@@ -2217,9 +2097,7 @@ bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
2217 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr); 2097 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
2218} 2098}
2219 2099
2220/** 2100/* Register mailbox message handler function, to be called by common modules */
2221 * Register mailbox message handler function, to be called by common modules
2222 */
2223void 2101void
2224bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc, 2102bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
2225 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg) 2103 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
@@ -2231,11 +2109,12 @@ bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
2231} 2109}
2232 2110
2233/** 2111/**
2234 * Queue a mailbox command request to firmware. Waits if mailbox is busy. 2112 * bfa_nw_ioc_mbox_queue - Queue a mailbox command request to firmware.
2235 * Responsibility of caller to serialize
2236 * 2113 *
2237 * @param[in] ioc IOC instance 2114 * @ioc: IOC instance
2238 * @param[i] cmd Mailbox command 2115 * @cmd: Mailbox command
2116 *
2117 * Waits if mailbox is busy. Responsibility of caller to serialize
2239 */ 2118 */
2240bool 2119bool
2241bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd, 2120bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd,
@@ -2272,9 +2151,7 @@ bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd,
2272 return false; 2151 return false;
2273} 2152}
2274 2153
2275/** 2154/* Handle mailbox interrupts */
2276 * Handle mailbox interrupts
2277 */
2278void 2155void
2279bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc) 2156bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
2280{ 2157{
@@ -2314,9 +2191,7 @@ bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
2314 bfa_fsm_send_event(ioc, IOC_E_HWERROR); 2191 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2315} 2192}
2316 2193
2317/** 2194/* return true if IOC is disabled */
2318 * return true if IOC is disabled
2319 */
2320bool 2195bool
2321bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc) 2196bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
2322{ 2197{
@@ -2324,17 +2199,14 @@ bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
2324 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled); 2199 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2325} 2200}
2326 2201
2327/** 2202/* return true if IOC is operational */
2328 * return true if IOC is operational
2329 */
2330bool 2203bool
2331bfa_nw_ioc_is_operational(struct bfa_ioc *ioc) 2204bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
2332{ 2205{
2333 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op); 2206 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2334} 2207}
2335 2208
2336/** 2209/* Add to IOC heartbeat failure notification queue. To be used by common
2337 * Add to IOC heartbeat failure notification queue. To be used by common
2338 * modules such as cee, port, diag. 2210 * modules such as cee, port, diag.
2339 */ 2211 */
2340void 2212void
@@ -2518,9 +2390,7 @@ bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
2518 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); 2390 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2519} 2391}
2520 2392
2521/** 2393/* WWN public */
2522 * WWN public
2523 */
2524static u64 2394static u64
2525bfa_ioc_get_pwwn(struct bfa_ioc *ioc) 2395bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
2526{ 2396{
@@ -2533,9 +2403,7 @@ bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
2533 return ioc->attr->mac; 2403 return ioc->attr->mac;
2534} 2404}
2535 2405
2536/** 2406/* Firmware failure detected. Start recovery actions. */
2537 * Firmware failure detected. Start recovery actions.
2538 */
2539static void 2407static void
2540bfa_ioc_recover(struct bfa_ioc *ioc) 2408bfa_ioc_recover(struct bfa_ioc *ioc)
2541{ 2409{
@@ -2545,10 +2413,7 @@ bfa_ioc_recover(struct bfa_ioc *ioc)
2545 bfa_fsm_send_event(ioc, IOC_E_HBFAIL); 2413 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2546} 2414}
2547 2415
2548/** 2416/* BFA IOC PF private functions */
2549 * @dg hal_iocpf_pvt BFA IOC PF private functions
2550 * @{
2551 */
2552 2417
2553static void 2418static void
2554bfa_iocpf_enable(struct bfa_ioc *ioc) 2419bfa_iocpf_enable(struct bfa_ioc *ioc)
@@ -2669,8 +2534,6 @@ bfa_flash_notify(void *cbarg, enum bfa_ioc_event event)
2669 2534
2670/* 2535/*
2671 * Send flash write request. 2536 * Send flash write request.
2672 *
2673 * @param[in] cbarg - callback argument
2674 */ 2537 */
2675static void 2538static void
2676bfa_flash_write_send(struct bfa_flash *flash) 2539bfa_flash_write_send(struct bfa_flash *flash)
@@ -2699,10 +2562,10 @@ bfa_flash_write_send(struct bfa_flash *flash)
2699 flash->offset += len; 2562 flash->offset += len;
2700} 2563}
2701 2564
2702/* 2565/**
2703 * Send flash read request. 2566 * bfa_flash_read_send - Send flash read request.
2704 * 2567 *
2705 * @param[in] cbarg - callback argument 2568 * @cbarg: callback argument
2706 */ 2569 */
2707static void 2570static void
2708bfa_flash_read_send(void *cbarg) 2571bfa_flash_read_send(void *cbarg)
@@ -2724,11 +2587,11 @@ bfa_flash_read_send(void *cbarg)
2724 bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL); 2587 bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
2725} 2588}
2726 2589
2727/* 2590/**
2728 * Process flash response messages upon receiving interrupts. 2591 * bfa_flash_intr - Process flash response messages upon receiving interrupts.
2729 * 2592 *
2730 * @param[in] flasharg - flash structure 2593 * @flasharg: flash structure
2731 * @param[in] msg - message structure 2594 * @msg: message structure
2732 */ 2595 */
2733static void 2596static void
2734bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg) 2597bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg)
@@ -2821,12 +2684,12 @@ bfa_nw_flash_meminfo(void)
2821 return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); 2684 return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
2822} 2685}
2823 2686
2824/* 2687/**
2825 * Flash attach API. 2688 * bfa_nw_flash_attach - Flash attach API.
2826 * 2689 *
2827 * @param[in] flash - flash structure 2690 * @flash: flash structure
2828 * @param[in] ioc - ioc structure 2691 * @ioc: ioc structure
2829 * @param[in] dev - device structure 2692 * @dev: device structure
2830 */ 2693 */
2831void 2694void
2832bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev) 2695bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
@@ -2842,12 +2705,12 @@ bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
2842 list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q); 2705 list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
2843} 2706}
2844 2707
2845/* 2708/**
2846 * Claim memory for flash 2709 * bfa_nw_flash_memclaim - Claim memory for flash
2847 * 2710 *
2848 * @param[in] flash - flash structure 2711 * @flash: flash structure
2849 * @param[in] dm_kva - pointer to virtual memory address 2712 * @dm_kva: pointer to virtual memory address
2850 * @param[in] dm_pa - physical memory address 2713 * @dm_pa: physical memory address
2851 */ 2714 */
2852void 2715void
2853bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa) 2716bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa)
@@ -2859,13 +2722,13 @@ bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa)
2859 dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); 2722 dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
2860} 2723}
2861 2724
2862/* 2725/**
2863 * Get flash attribute. 2726 * bfa_nw_flash_get_attr - Get flash attribute.
2864 * 2727 *
2865 * @param[in] flash - flash structure 2728 * @flash: flash structure
2866 * @param[in] attr - flash attribute structure 2729 * @attr: flash attribute structure
2867 * @param[in] cbfn - callback function 2730 * @cbfn: callback function
2868 * @param[in] cbarg - callback argument 2731 * @cbarg: callback argument
2869 * 2732 *
2870 * Return status. 2733 * Return status.
2871 */ 2734 */
@@ -2895,17 +2758,17 @@ bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr,
2895 return BFA_STATUS_OK; 2758 return BFA_STATUS_OK;
2896} 2759}
2897 2760
2898/* 2761/**
2899 * Update flash partition. 2762 * bfa_nw_flash_update_part - Update flash partition.
2900 * 2763 *
2901 * @param[in] flash - flash structure 2764 * @flash: flash structure
2902 * @param[in] type - flash partition type 2765 * @type: flash partition type
2903 * @param[in] instance - flash partition instance 2766 * @instance: flash partition instance
2904 * @param[in] buf - update data buffer 2767 * @buf: update data buffer
2905 * @param[in] len - data buffer length 2768 * @len: data buffer length
2906 * @param[in] offset - offset relative to the partition starting address 2769 * @offset: offset relative to the partition starting address
2907 * @param[in] cbfn - callback function 2770 * @cbfn: callback function
2908 * @param[in] cbarg - callback argument 2771 * @cbarg: callback argument
2909 * 2772 *
2910 * Return status. 2773 * Return status.
2911 */ 2774 */
@@ -2944,17 +2807,17 @@ bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance,
2944 return BFA_STATUS_OK; 2807 return BFA_STATUS_OK;
2945} 2808}
2946 2809
2947/* 2810/**
2948 * Read flash partition. 2811 * bfa_nw_flash_read_part - Read flash partition.
2949 * 2812 *
2950 * @param[in] flash - flash structure 2813 * @flash: flash structure
2951 * @param[in] type - flash partition type 2814 * @type: flash partition type
2952 * @param[in] instance - flash partition instance 2815 * @instance: flash partition instance
2953 * @param[in] buf - read data buffer 2816 * @buf: read data buffer
2954 * @param[in] len - data buffer length 2817 * @len: data buffer length
2955 * @param[in] offset - offset relative to the partition starting address 2818 * @offset: offset relative to the partition starting address
2956 * @param[in] cbfn - callback function 2819 * @cbfn: callback function
2957 * @param[in] cbarg - callback argument 2820 * @cbarg: callback argument
2958 * 2821 *
2959 * Return status. 2822 * Return status.
2960 */ 2823 */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
index 3b4460fdc148..63a85e555df8 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
@@ -30,9 +30,7 @@
30#define BNA_DBG_FWTRC_LEN (BFI_IOC_TRC_ENTS * BFI_IOC_TRC_ENT_SZ + \ 30#define BNA_DBG_FWTRC_LEN (BFI_IOC_TRC_ENTS * BFI_IOC_TRC_ENT_SZ + \
31 BFI_IOC_TRC_HDR_SZ) 31 BFI_IOC_TRC_HDR_SZ)
32 32
33/** 33/* PCI device information required by IOC */
34 * PCI device information required by IOC
35 */
36struct bfa_pcidev { 34struct bfa_pcidev {
37 int pci_slot; 35 int pci_slot;
38 u8 pci_func; 36 u8 pci_func;
@@ -41,8 +39,7 @@ struct bfa_pcidev {
41 void __iomem *pci_bar_kva; 39 void __iomem *pci_bar_kva;
42}; 40};
43 41
44/** 42/* Structure used to remember the DMA-able memory block's KVA and Physical
45 * Structure used to remember the DMA-able memory block's KVA and Physical
46 * Address 43 * Address
47 */ 44 */
48struct bfa_dma { 45struct bfa_dma {
@@ -52,15 +49,11 @@ struct bfa_dma {
52 49
53#define BFA_DMA_ALIGN_SZ 256 50#define BFA_DMA_ALIGN_SZ 256
54 51
55/** 52/* smem size for Crossbow and Catapult */
56 * smem size for Crossbow and Catapult
57 */
58#define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */ 53#define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */
59#define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */ 54#define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */
60 55
61/** 56/* BFA dma address assignment macro. (big endian format) */
62 * @brief BFA dma address assignment macro. (big endian format)
63 */
64#define bfa_dma_be_addr_set(dma_addr, pa) \ 57#define bfa_dma_be_addr_set(dma_addr, pa) \
65 __bfa_dma_be_addr_set(&dma_addr, (u64)pa) 58 __bfa_dma_be_addr_set(&dma_addr, (u64)pa)
66static inline void 59static inline void
@@ -108,9 +101,7 @@ struct bfa_ioc_regs {
108 u32 smem_pg0; 101 u32 smem_pg0;
109}; 102};
110 103
111/** 104/* IOC Mailbox structures */
112 * IOC Mailbox structures
113 */
114typedef void (*bfa_mbox_cmd_cbfn_t)(void *cbarg); 105typedef void (*bfa_mbox_cmd_cbfn_t)(void *cbarg);
115struct bfa_mbox_cmd { 106struct bfa_mbox_cmd {
116 struct list_head qe; 107 struct list_head qe;
@@ -119,9 +110,7 @@ struct bfa_mbox_cmd {
119 u32 msg[BFI_IOC_MSGSZ]; 110 u32 msg[BFI_IOC_MSGSZ];
120}; 111};
121 112
122/** 113/* IOC mailbox module */
123 * IOC mailbox module
124 */
125typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg *m); 114typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg *m);
126struct bfa_ioc_mbox_mod { 115struct bfa_ioc_mbox_mod {
127 struct list_head cmd_q; /*!< pending mbox queue */ 116 struct list_head cmd_q; /*!< pending mbox queue */
@@ -132,9 +121,7 @@ struct bfa_ioc_mbox_mod {
132 } mbhdlr[BFI_MC_MAX]; 121 } mbhdlr[BFI_MC_MAX];
133}; 122};
134 123
135/** 124/* IOC callback function interfaces */
136 * IOC callback function interfaces
137 */
138typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status); 125typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status);
139typedef void (*bfa_ioc_disable_cbfn_t)(void *bfa); 126typedef void (*bfa_ioc_disable_cbfn_t)(void *bfa);
140typedef void (*bfa_ioc_hbfail_cbfn_t)(void *bfa); 127typedef void (*bfa_ioc_hbfail_cbfn_t)(void *bfa);
@@ -146,9 +133,7 @@ struct bfa_ioc_cbfn {
146 bfa_ioc_reset_cbfn_t reset_cbfn; 133 bfa_ioc_reset_cbfn_t reset_cbfn;
147}; 134};
148 135
149/** 136/* IOC event notification mechanism. */
150 * IOC event notification mechanism.
151 */
152enum bfa_ioc_event { 137enum bfa_ioc_event {
153 BFA_IOC_E_ENABLED = 1, 138 BFA_IOC_E_ENABLED = 1,
154 BFA_IOC_E_DISABLED = 2, 139 BFA_IOC_E_DISABLED = 2,
@@ -163,9 +148,7 @@ struct bfa_ioc_notify {
163 void *cbarg; 148 void *cbarg;
164}; 149};
165 150
166/** 151/* Initialize a IOC event notification structure */
167 * Initialize a IOC event notification structure
168 */
169#define bfa_ioc_notify_init(__notify, __cbfn, __cbarg) do { \ 152#define bfa_ioc_notify_init(__notify, __cbfn, __cbarg) do { \
170 (__notify)->cbfn = (__cbfn); \ 153 (__notify)->cbfn = (__cbfn); \
171 (__notify)->cbarg = (__cbarg); \ 154 (__notify)->cbarg = (__cbarg); \
@@ -261,9 +244,7 @@ struct bfa_ioc_hwif {
261#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS) 244#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
262#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS) 245#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
263 246
264/** 247/* IOC mailbox interface */
265 * IOC mailbox interface
266 */
267bool bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, 248bool bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc,
268 struct bfa_mbox_cmd *cmd, 249 struct bfa_mbox_cmd *cmd,
269 bfa_mbox_cmd_cbfn_t cbfn, void *cbarg); 250 bfa_mbox_cmd_cbfn_t cbfn, void *cbarg);
@@ -271,9 +252,7 @@ void bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc);
271void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc, 252void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
272 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg); 253 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
273 254
274/** 255/* IOC interfaces */
275 * IOC interfaces
276 */
277 256
278#define bfa_ioc_pll_init_asic(__ioc) \ 257#define bfa_ioc_pll_init_asic(__ioc) \
279 ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \ 258 ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
index b6b036a143ae..5df0b0c68c5a 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
@@ -87,9 +87,7 @@ static const struct bfa_ioc_hwif nw_hwif_ct2 = {
87 .ioc_sync_complete = bfa_ioc_ct_sync_complete, 87 .ioc_sync_complete = bfa_ioc_ct_sync_complete,
88}; 88};
89 89
90/** 90/* Called from bfa_ioc_attach() to map asic specific calls. */
91 * Called from bfa_ioc_attach() to map asic specific calls.
92 */
93void 91void
94bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc) 92bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
95{ 93{
@@ -102,9 +100,7 @@ bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc)
102 ioc->ioc_hwif = &nw_hwif_ct2; 100 ioc->ioc_hwif = &nw_hwif_ct2;
103} 101}
104 102
105/** 103/* Return true if firmware of current driver matches the running firmware. */
106 * Return true if firmware of current driver matches the running firmware.
107 */
108static bool 104static bool
109bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc) 105bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
110{ 106{
@@ -182,9 +178,7 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
182 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 178 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
183} 179}
184 180
185/** 181/* Notify other functions on HB failure. */
186 * Notify other functions on HB failure.
187 */
188static void 182static void
189bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc) 183bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
190{ 184{
@@ -195,9 +189,7 @@ bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
195 readl(ioc->ioc_regs.alt_ll_halt); 189 readl(ioc->ioc_regs.alt_ll_halt);
196} 190}
197 191
198/** 192/* Host to LPU mailbox message addresses */
199 * Host to LPU mailbox message addresses
200 */
201static const struct { 193static const struct {
202 u32 hfn_mbox; 194 u32 hfn_mbox;
203 u32 lpu_mbox; 195 u32 lpu_mbox;
@@ -209,9 +201,7 @@ static const struct {
209 { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 } 201 { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
210}; 202};
211 203
212/** 204/* Host <-> LPU mailbox command/status registers - port 0 */
213 * Host <-> LPU mailbox command/status registers - port 0
214 */
215static const struct { 205static const struct {
216 u32 hfn; 206 u32 hfn;
217 u32 lpu; 207 u32 lpu;
@@ -222,9 +212,7 @@ static const struct {
222 { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT } 212 { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
223}; 213};
224 214
225/** 215/* Host <-> LPU mailbox command/status registers - port 1 */
226 * Host <-> LPU mailbox command/status registers - port 1
227 */
228static const struct { 216static const struct {
229 u32 hfn; 217 u32 hfn;
230 u32 lpu; 218 u32 lpu;
@@ -368,9 +356,7 @@ bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc)
368 ioc->ioc_regs.err_set = rb + ERR_SET_REG; 356 ioc->ioc_regs.err_set = rb + ERR_SET_REG;
369} 357}
370 358
371/** 359/* Initialize IOC to port mapping. */
372 * Initialize IOC to port mapping.
373 */
374 360
375#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8) 361#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
376static void 362static void
@@ -398,9 +384,7 @@ bfa_ioc_ct2_map_port(struct bfa_ioc *ioc)
398 ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH); 384 ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
399} 385}
400 386
401/** 387/* Set interrupt mode for a function: INTX or MSIX */
402 * Set interrupt mode for a function: INTX or MSIX
403 */
404static void 388static void
405bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix) 389bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
406{ 390{
@@ -443,9 +427,7 @@ bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc)
443 return false; 427 return false;
444} 428}
445 429
446/** 430/* MSI-X resource allocation for 1860 with no asic block */
447 * MSI-X resource allocation for 1860 with no asic block
448 */
449#define HOSTFN_MSIX_DEFAULT 64 431#define HOSTFN_MSIX_DEFAULT 64
450#define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138 432#define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138
451#define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c 433#define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c
@@ -473,9 +455,7 @@ bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc)
473 rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR); 455 rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
474} 456}
475 457
476/** 458/* Cleanup hw semaphore and usecnt registers */
477 * Cleanup hw semaphore and usecnt registers
478 */
479static void 459static void
480bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc) 460bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
481{ 461{
@@ -492,9 +472,7 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
492 bfa_nw_ioc_hw_sem_release(ioc); 472 bfa_nw_ioc_hw_sem_release(ioc);
493} 473}
494 474
495/** 475/* Synchronized IOC failure processing routines */
496 * Synchronized IOC failure processing routines
497 */
498static bool 476static bool
499bfa_ioc_ct_sync_start(struct bfa_ioc *ioc) 477bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
500{ 478{
@@ -518,9 +496,7 @@ bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
518 496
519 return bfa_ioc_ct_sync_complete(ioc); 497 return bfa_ioc_ct_sync_complete(ioc);
520} 498}
521/** 499/* Synchronized IOC failure processing routines */
522 * Synchronized IOC failure processing routines
523 */
524static void 500static void
525bfa_ioc_ct_sync_join(struct bfa_ioc *ioc) 501bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
526{ 502{
diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.c b/drivers/net/ethernet/brocade/bna/bfa_msgq.c
index dd36427f4752..55067d0d25cf 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_msgq.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.c
@@ -16,9 +16,7 @@
16 * www.brocade.com 16 * www.brocade.com
17 */ 17 */
18 18
19/** 19/* MSGQ module source file. */
20 * @file bfa_msgq.c MSGQ module source file.
21 */
22 20
23#include "bfi.h" 21#include "bfi.h"
24#include "bfa_msgq.h" 22#include "bfa_msgq.h"
diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h
index 0d9df695397a..1f24c23dc786 100644
--- a/drivers/net/ethernet/brocade/bna/bfi.h
+++ b/drivers/net/ethernet/brocade/bna/bfi.h
@@ -22,15 +22,11 @@
22 22
23#pragma pack(1) 23#pragma pack(1)
24 24
25/** 25/* BFI FW image type */
26 * BFI FW image type
27 */
28#define BFI_FLASH_CHUNK_SZ 256 /*!< Flash chunk size */ 26#define BFI_FLASH_CHUNK_SZ 256 /*!< Flash chunk size */
29#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32)) 27#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
30 28
31/** 29/* Msg header common to all msgs */
32 * Msg header common to all msgs
33 */
34struct bfi_mhdr { 30struct bfi_mhdr {
35 u8 msg_class; /*!< @ref enum bfi_mclass */ 31 u8 msg_class; /*!< @ref enum bfi_mclass */
36 u8 msg_id; /*!< msg opcode with in the class */ 32 u8 msg_id; /*!< msg opcode with in the class */
@@ -65,17 +61,14 @@ struct bfi_mhdr {
65#define BFI_I2H_OPCODE_BASE 128 61#define BFI_I2H_OPCODE_BASE 128
66#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE) 62#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE)
67 63
68/** 64/****************************************************************************
69 ****************************************************************************
70 * 65 *
71 * Scatter Gather Element and Page definition 66 * Scatter Gather Element and Page definition
72 * 67 *
73 **************************************************************************** 68 ****************************************************************************
74 */ 69 */
75 70
76/** 71/* DMA addresses */
77 * DMA addresses
78 */
79union bfi_addr_u { 72union bfi_addr_u {
80 struct { 73 struct {
81 u32 addr_lo; 74 u32 addr_lo;
@@ -83,9 +76,7 @@ union bfi_addr_u {
83 } a32; 76 } a32;
84}; 77};
85 78
86/** 79/* Generic DMA addr-len pair. */
87 * Generic DMA addr-len pair.
88 */
89struct bfi_alen { 80struct bfi_alen {
90 union bfi_addr_u al_addr; /* DMA addr of buffer */ 81 union bfi_addr_u al_addr; /* DMA addr of buffer */
91 u32 al_len; /* length of buffer */ 82 u32 al_len; /* length of buffer */
@@ -98,26 +89,20 @@ struct bfi_alen {
98#define BFI_LMSG_PL_WSZ \ 89#define BFI_LMSG_PL_WSZ \
99 ((BFI_LMSG_SZ - sizeof(struct bfi_mhdr)) / 4) 90 ((BFI_LMSG_SZ - sizeof(struct bfi_mhdr)) / 4)
100 91
101/** 92/* Mailbox message structure */
102 * Mailbox message structure
103 */
104#define BFI_MBMSG_SZ 7 93#define BFI_MBMSG_SZ 7
105struct bfi_mbmsg { 94struct bfi_mbmsg {
106 struct bfi_mhdr mh; 95 struct bfi_mhdr mh;
107 u32 pl[BFI_MBMSG_SZ]; 96 u32 pl[BFI_MBMSG_SZ];
108}; 97};
109 98
110/** 99/* Supported PCI function class codes (personality) */
111 * Supported PCI function class codes (personality)
112 */
113enum bfi_pcifn_class { 100enum bfi_pcifn_class {
114 BFI_PCIFN_CLASS_FC = 0x0c04, 101 BFI_PCIFN_CLASS_FC = 0x0c04,
115 BFI_PCIFN_CLASS_ETH = 0x0200, 102 BFI_PCIFN_CLASS_ETH = 0x0200,
116}; 103};
117 104
118/** 105/* Message Classes */
119 * Message Classes
120 */
121enum bfi_mclass { 106enum bfi_mclass {
122 BFI_MC_IOC = 1, /*!< IO Controller (IOC) */ 107 BFI_MC_IOC = 1, /*!< IO Controller (IOC) */
123 BFI_MC_DIAG = 2, /*!< Diagnostic Msgs */ 108 BFI_MC_DIAG = 2, /*!< Diagnostic Msgs */
@@ -159,15 +144,12 @@ enum bfi_mclass {
159 144
160#define BFI_FWBOOT_ENV_OS 0 145#define BFI_FWBOOT_ENV_OS 0
161 146
162/** 147/*----------------------------------------------------------------------
163 *----------------------------------------------------------------------
164 * IOC 148 * IOC
165 *---------------------------------------------------------------------- 149 *----------------------------------------------------------------------
166 */ 150 */
167 151
168/** 152/* Different asic generations */
169 * Different asic generations
170 */
171enum bfi_asic_gen { 153enum bfi_asic_gen {
172 BFI_ASIC_GEN_CB = 1, 154 BFI_ASIC_GEN_CB = 1,
173 BFI_ASIC_GEN_CT = 2, 155 BFI_ASIC_GEN_CT = 2,
@@ -196,9 +178,7 @@ enum bfi_ioc_i2h_msgs {
196 BFI_IOC_I2H_HBEAT = BFA_I2HM(4), 178 BFI_IOC_I2H_HBEAT = BFA_I2HM(4),
197}; 179};
198 180
199/** 181/* BFI_IOC_H2I_GETATTR_REQ message */
200 * BFI_IOC_H2I_GETATTR_REQ message
201 */
202struct bfi_ioc_getattr_req { 182struct bfi_ioc_getattr_req {
203 struct bfi_mhdr mh; 183 struct bfi_mhdr mh;
204 union bfi_addr_u attr_addr; 184 union bfi_addr_u attr_addr;
@@ -231,30 +211,22 @@ struct bfi_ioc_attr {
231 u32 card_type; /*!< card type */ 211 u32 card_type; /*!< card type */
232}; 212};
233 213
234/** 214/* BFI_IOC_I2H_GETATTR_REPLY message */
235 * BFI_IOC_I2H_GETATTR_REPLY message
236 */
237struct bfi_ioc_getattr_reply { 215struct bfi_ioc_getattr_reply {
238 struct bfi_mhdr mh; /*!< Common msg header */ 216 struct bfi_mhdr mh; /*!< Common msg header */
239 u8 status; /*!< cfg reply status */ 217 u8 status; /*!< cfg reply status */
240 u8 rsvd[3]; 218 u8 rsvd[3];
241}; 219};
242 220
243/** 221/* Firmware memory page offsets */
244 * Firmware memory page offsets
245 */
246#define BFI_IOC_SMEM_PG0_CB (0x40) 222#define BFI_IOC_SMEM_PG0_CB (0x40)
247#define BFI_IOC_SMEM_PG0_CT (0x180) 223#define BFI_IOC_SMEM_PG0_CT (0x180)
248 224
249/** 225/* Firmware statistic offset */
250 * Firmware statistic offset
251 */
252#define BFI_IOC_FWSTATS_OFF (0x6B40) 226#define BFI_IOC_FWSTATS_OFF (0x6B40)
253#define BFI_IOC_FWSTATS_SZ (4096) 227#define BFI_IOC_FWSTATS_SZ (4096)
254 228
255/** 229/* Firmware trace offset */
256 * Firmware trace offset
257 */
258#define BFI_IOC_TRC_OFF (0x4b00) 230#define BFI_IOC_TRC_OFF (0x4b00)
259#define BFI_IOC_TRC_ENTS 256 231#define BFI_IOC_TRC_ENTS 256
260#define BFI_IOC_TRC_ENT_SZ 16 232#define BFI_IOC_TRC_ENT_SZ 16
@@ -299,9 +271,7 @@ struct bfi_ioc_hbeat {
299 u32 hb_count; /*!< current heart beat count */ 271 u32 hb_count; /*!< current heart beat count */
300}; 272};
301 273
302/** 274/* IOC hardware/firmware state */
303 * IOC hardware/firmware state
304 */
305enum bfi_ioc_state { 275enum bfi_ioc_state {
306 BFI_IOC_UNINIT = 0, /*!< not initialized */ 276 BFI_IOC_UNINIT = 0, /*!< not initialized */
307 BFI_IOC_INITING = 1, /*!< h/w is being initialized */ 277 BFI_IOC_INITING = 1, /*!< h/w is being initialized */
@@ -345,9 +315,7 @@ enum {
345 ((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \ 315 ((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \
346 BFI_ADAPTER_UNSUPP)) 316 BFI_ADAPTER_UNSUPP))
347 317
348/** 318/* BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages */
349 * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages
350 */
351struct bfi_ioc_ctrl_req { 319struct bfi_ioc_ctrl_req {
352 struct bfi_mhdr mh; 320 struct bfi_mhdr mh;
353 u16 clscode; 321 u16 clscode;
@@ -355,9 +323,7 @@ struct bfi_ioc_ctrl_req {
355 u32 tv_sec; 323 u32 tv_sec;
356}; 324};
357 325
358/** 326/* BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages */
359 * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages
360 */
361struct bfi_ioc_ctrl_reply { 327struct bfi_ioc_ctrl_reply {
362 struct bfi_mhdr mh; /*!< Common msg header */ 328 struct bfi_mhdr mh; /*!< Common msg header */
363 u8 status; /*!< enable/disable status */ 329 u8 status; /*!< enable/disable status */
@@ -367,9 +333,7 @@ struct bfi_ioc_ctrl_reply {
367}; 333};
368 334
369#define BFI_IOC_MSGSZ 8 335#define BFI_IOC_MSGSZ 8
370/** 336/* H2I Messages */
371 * H2I Messages
372 */
373union bfi_ioc_h2i_msg_u { 337union bfi_ioc_h2i_msg_u {
374 struct bfi_mhdr mh; 338 struct bfi_mhdr mh;
375 struct bfi_ioc_ctrl_req enable_req; 339 struct bfi_ioc_ctrl_req enable_req;
@@ -378,17 +342,14 @@ union bfi_ioc_h2i_msg_u {
378 u32 mboxmsg[BFI_IOC_MSGSZ]; 342 u32 mboxmsg[BFI_IOC_MSGSZ];
379}; 343};
380 344
381/** 345/* I2H Messages */
382 * I2H Messages
383 */
384union bfi_ioc_i2h_msg_u { 346union bfi_ioc_i2h_msg_u {
385 struct bfi_mhdr mh; 347 struct bfi_mhdr mh;
386 struct bfi_ioc_ctrl_reply fw_event; 348 struct bfi_ioc_ctrl_reply fw_event;
387 u32 mboxmsg[BFI_IOC_MSGSZ]; 349 u32 mboxmsg[BFI_IOC_MSGSZ];
388}; 350};
389 351
390/** 352/*----------------------------------------------------------------------
391 *----------------------------------------------------------------------
392 * MSGQ 353 * MSGQ
393 *---------------------------------------------------------------------- 354 *----------------------------------------------------------------------
394 */ 355 */
diff --git a/drivers/net/ethernet/brocade/bna/bfi_cna.h b/drivers/net/ethernet/brocade/bna/bfi_cna.h
index 4eecabea397b..6704a4392973 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_cna.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_cna.h
@@ -37,18 +37,14 @@ enum bfi_port_i2h {
37 BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4), 37 BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4),
38}; 38};
39 39
40/** 40/* Generic REQ type */
41 * Generic REQ type
42 */
43struct bfi_port_generic_req { 41struct bfi_port_generic_req {
44 struct bfi_mhdr mh; /*!< msg header */ 42 struct bfi_mhdr mh; /*!< msg header */
45 u32 msgtag; /*!< msgtag for reply */ 43 u32 msgtag; /*!< msgtag for reply */
46 u32 rsvd; 44 u32 rsvd;
47}; 45};
48 46
49/** 47/* Generic RSP type */
50 * Generic RSP type
51 */
52struct bfi_port_generic_rsp { 48struct bfi_port_generic_rsp {
53 struct bfi_mhdr mh; /*!< common msg header */ 49 struct bfi_mhdr mh; /*!< common msg header */
54 u8 status; /*!< port enable status */ 50 u8 status; /*!< port enable status */
@@ -56,44 +52,12 @@ struct bfi_port_generic_rsp {
56 u32 msgtag; /*!< msgtag for reply */ 52 u32 msgtag; /*!< msgtag for reply */
57}; 53};
58 54
59/** 55/* BFI_PORT_H2I_GET_STATS_REQ */
60 * @todo
61 * BFI_PORT_H2I_ENABLE_REQ
62 */
63
64/**
65 * @todo
66 * BFI_PORT_I2H_ENABLE_RSP
67 */
68
69/**
70 * BFI_PORT_H2I_DISABLE_REQ
71 */
72
73/**
74 * BFI_PORT_I2H_DISABLE_RSP
75 */
76
77/**
78 * BFI_PORT_H2I_GET_STATS_REQ
79 */
80struct bfi_port_get_stats_req { 56struct bfi_port_get_stats_req {
81 struct bfi_mhdr mh; /*!< common msg header */ 57 struct bfi_mhdr mh; /*!< common msg header */
82 union bfi_addr_u dma_addr; 58 union bfi_addr_u dma_addr;
83}; 59};
84 60
85/**
86 * BFI_PORT_I2H_GET_STATS_RSP
87 */
88
89/**
90 * BFI_PORT_H2I_CLEAR_STATS_REQ
91 */
92
93/**
94 * BFI_PORT_I2H_CLEAR_STATS_RSP
95 */
96
97union bfi_port_h2i_msg_u { 61union bfi_port_h2i_msg_u {
98 struct bfi_mhdr mh; 62 struct bfi_mhdr mh;
99 struct bfi_port_generic_req enable_req; 63 struct bfi_port_generic_req enable_req;
diff --git a/drivers/net/ethernet/brocade/bna/bfi_enet.h b/drivers/net/ethernet/brocade/bna/bfi_enet.h
index a90f1cf46b41..eef6e1f8aecc 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_enet.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_enet.h
@@ -16,12 +16,9 @@
16 * www.brocade.com 16 * www.brocade.com
17 */ 17 */
18 18
19/** 19/* BNA Hardware and Firmware Interface */
20 * @file bfi_enet.h BNA Hardware and Firmware Interface
21 */
22 20
23/** 21/* Skipping statistics collection to avoid clutter.
24 * Skipping statistics collection to avoid clutter.
25 * Command is no longer needed: 22 * Command is no longer needed:
26 * MTU 23 * MTU
27 * TxQ Stop 24 * TxQ Stop
@@ -64,9 +61,7 @@ union bfi_addr_be_u {
64 } a32; 61 } a32;
65}; 62};
66 63
67/** 64/* T X Q U E U E D E F I N E S */
68 * T X Q U E U E D E F I N E S
69 */
70/* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */ 65/* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */
71/* TxQ Entry Opcodes */ 66/* TxQ Entry Opcodes */
72#define BFI_ENET_TXQ_WI_SEND (0x402) /* Single Frame Transmission */ 67#define BFI_ENET_TXQ_WI_SEND (0x402) /* Single Frame Transmission */
@@ -106,10 +101,7 @@ struct bfi_enet_txq_wi_vector { /* Tx Buffer Descriptor */
106 union bfi_addr_be_u addr; 101 union bfi_addr_be_u addr;
107}; 102};
108 103
109/** 104/* TxQ Entry Structure */
110 * TxQ Entry Structure
111 *
112 */
113struct bfi_enet_txq_entry { 105struct bfi_enet_txq_entry {
114 union { 106 union {
115 struct bfi_enet_txq_wi_base base; 107 struct bfi_enet_txq_wi_base base;
@@ -124,16 +116,12 @@ struct bfi_enet_txq_entry {
124#define BFI_ENET_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \ 116#define BFI_ENET_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \
125 (((_hdr_size) << 10) | ((_offset) & 0x3FF)) 117 (((_hdr_size) << 10) | ((_offset) & 0x3FF))
126 118
127/** 119/* R X Q U E U E D E F I N E S */
128 * R X Q U E U E D E F I N E S
129 */
130struct bfi_enet_rxq_entry { 120struct bfi_enet_rxq_entry {
131 union bfi_addr_be_u rx_buffer; 121 union bfi_addr_be_u rx_buffer;
132}; 122};
133 123
134/** 124/* R X C O M P L E T I O N Q U E U E D E F I N E S */
135 * R X C O M P L E T I O N Q U E U E D E F I N E S
136 */
137/* CQ Entry Flags */ 125/* CQ Entry Flags */
138#define BFI_ENET_CQ_EF_MAC_ERROR (1 << 0) 126#define BFI_ENET_CQ_EF_MAC_ERROR (1 << 0)
139#define BFI_ENET_CQ_EF_FCS_ERROR (1 << 1) 127#define BFI_ENET_CQ_EF_FCS_ERROR (1 << 1)
@@ -174,9 +162,7 @@ struct bfi_enet_cq_entry {
174 u8 rxq_id; 162 u8 rxq_id;
175}; 163};
176 164
177/** 165/* E N E T C O N T R O L P A T H C O M M A N D S */
178 * E N E T C O N T R O L P A T H C O M M A N D S
179 */
180struct bfi_enet_q { 166struct bfi_enet_q {
181 union bfi_addr_u pg_tbl; 167 union bfi_addr_u pg_tbl;
182 union bfi_addr_u first_entry; 168 union bfi_addr_u first_entry;
@@ -222,9 +208,7 @@ struct bfi_enet_ib {
222 u16 rsvd; 208 u16 rsvd;
223}; 209};
224 210
225/** 211/* ENET command messages */
226 * ENET command messages
227 */
228enum bfi_enet_h2i_msgs { 212enum bfi_enet_h2i_msgs {
229 /* Rx Commands */ 213 /* Rx Commands */
230 BFI_ENET_H2I_RX_CFG_SET_REQ = 1, 214 BFI_ENET_H2I_RX_CFG_SET_REQ = 1,
@@ -350,9 +334,7 @@ enum bfi_enet_i2h_msgs {
350 BFI_ENET_I2H_BW_UPDATE_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 4), 334 BFI_ENET_I2H_BW_UPDATE_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 4),
351}; 335};
352 336
353/** 337/* The following error codes can be returned by the enet commands */
354 * The following error codes can be returned by the enet commands
355 */
356enum bfi_enet_err { 338enum bfi_enet_err {
357 BFI_ENET_CMD_OK = 0, 339 BFI_ENET_CMD_OK = 0,
358 BFI_ENET_CMD_FAIL = 1, 340 BFI_ENET_CMD_FAIL = 1,
@@ -364,8 +346,7 @@ enum bfi_enet_err {
364 BFI_ENET_CMD_PORT_DISABLED = 7, /* !< port in disabled state */ 346 BFI_ENET_CMD_PORT_DISABLED = 7, /* !< port in disabled state */
365}; 347};
366 348
367/** 349/* Generic Request
368 * Generic Request
369 * 350 *
370 * bfi_enet_req is used by: 351 * bfi_enet_req is used by:
371 * BFI_ENET_H2I_RX_CFG_CLR_REQ 352 * BFI_ENET_H2I_RX_CFG_CLR_REQ
@@ -375,8 +356,7 @@ struct bfi_enet_req {
375 struct bfi_msgq_mhdr mh; 356 struct bfi_msgq_mhdr mh;
376}; 357};
377 358
378/** 359/* Enable/Disable Request
379 * Enable/Disable Request
380 * 360 *
381 * bfi_enet_enable_req is used by: 361 * bfi_enet_enable_req is used by:
382 * BFI_ENET_H2I_RSS_ENABLE_REQ (enet_id must be zero) 362 * BFI_ENET_H2I_RSS_ENABLE_REQ (enet_id must be zero)
@@ -391,9 +371,7 @@ struct bfi_enet_enable_req {
391 u8 rsvd[3]; 371 u8 rsvd[3];
392}; 372};
393 373
394/** 374/* Generic Response */
395 * Generic Response
396 */
397struct bfi_enet_rsp { 375struct bfi_enet_rsp {
398 struct bfi_msgq_mhdr mh; 376 struct bfi_msgq_mhdr mh;
399 u8 error; /*!< if error see cmd_offset */ 377 u8 error; /*!< if error see cmd_offset */
@@ -401,20 +379,16 @@ struct bfi_enet_rsp {
401 u16 cmd_offset; /*!< offset to invalid parameter */ 379 u16 cmd_offset; /*!< offset to invalid parameter */
402}; 380};
403 381
404/** 382/* GLOBAL CONFIGURATION */
405 * GLOBAL CONFIGURATION
406 */
407 383
408/** 384/* bfi_enet_attr_req is used by:
409 * bfi_enet_attr_req is used by:
410 * BFI_ENET_H2I_GET_ATTR_REQ 385 * BFI_ENET_H2I_GET_ATTR_REQ
411 */ 386 */
412struct bfi_enet_attr_req { 387struct bfi_enet_attr_req {
413 struct bfi_msgq_mhdr mh; 388 struct bfi_msgq_mhdr mh;
414}; 389};
415 390
416/** 391/* bfi_enet_attr_rsp is used by:
417 * bfi_enet_attr_rsp is used by:
418 * BFI_ENET_I2H_GET_ATTR_RSP 392 * BFI_ENET_I2H_GET_ATTR_RSP
419 */ 393 */
420struct bfi_enet_attr_rsp { 394struct bfi_enet_attr_rsp {
@@ -427,8 +401,7 @@ struct bfi_enet_attr_rsp {
427 u32 rit_size; 401 u32 rit_size;
428}; 402};
429 403
430/** 404/* Tx Configuration
431 * Tx Configuration
432 * 405 *
433 * bfi_enet_tx_cfg is used by: 406 * bfi_enet_tx_cfg is used by:
434 * BFI_ENET_H2I_TX_CFG_SET_REQ 407 * BFI_ENET_H2I_TX_CFG_SET_REQ
@@ -477,8 +450,7 @@ struct bfi_enet_tx_cfg_rsp {
477 } q_handles[BFI_ENET_TXQ_PRIO_MAX]; 450 } q_handles[BFI_ENET_TXQ_PRIO_MAX];
478}; 451};
479 452
480/** 453/* Rx Configuration
481 * Rx Configuration
482 * 454 *
483 * bfi_enet_rx_cfg is used by: 455 * bfi_enet_rx_cfg is used by:
484 * BFI_ENET_H2I_RX_CFG_SET_REQ 456 * BFI_ENET_H2I_RX_CFG_SET_REQ
@@ -553,8 +525,7 @@ struct bfi_enet_rx_cfg_rsp {
553 } q_handles[BFI_ENET_RX_QSET_MAX]; 525 } q_handles[BFI_ENET_RX_QSET_MAX];
554}; 526};
555 527
556/** 528/* RIT
557 * RIT
558 * 529 *
559 * bfi_enet_rit_req is used by: 530 * bfi_enet_rit_req is used by:
560 * BFI_ENET_H2I_RIT_CFG_REQ 531 * BFI_ENET_H2I_RIT_CFG_REQ
@@ -566,8 +537,7 @@ struct bfi_enet_rit_req {
566 u8 table[BFI_ENET_RSS_RIT_MAX]; 537 u8 table[BFI_ENET_RSS_RIT_MAX];
567}; 538};
568 539
569/** 540/* RSS
570 * RSS
571 * 541 *
572 * bfi_enet_rss_cfg_req is used by: 542 * bfi_enet_rss_cfg_req is used by:
573 * BFI_ENET_H2I_RSS_CFG_REQ 543 * BFI_ENET_H2I_RSS_CFG_REQ
@@ -591,8 +561,7 @@ struct bfi_enet_rss_cfg_req {
591 struct bfi_enet_rss_cfg cfg; 561 struct bfi_enet_rss_cfg cfg;
592}; 562};
593 563
594/** 564/* MAC Unicast
595 * MAC Unicast
596 * 565 *
597 * bfi_enet_rx_vlan_req is used by: 566 * bfi_enet_rx_vlan_req is used by:
598 * BFI_ENET_H2I_MAC_UCAST_SET_REQ 567 * BFI_ENET_H2I_MAC_UCAST_SET_REQ
@@ -606,17 +575,14 @@ struct bfi_enet_ucast_req {
606 u8 rsvd[2]; 575 u8 rsvd[2];
607}; 576};
608 577
609/** 578/* MAC Unicast + VLAN */
610 * MAC Unicast + VLAN
611 */
612struct bfi_enet_mac_n_vlan_req { 579struct bfi_enet_mac_n_vlan_req {
613 struct bfi_msgq_mhdr mh; 580 struct bfi_msgq_mhdr mh;
614 u16 vlan_id; 581 u16 vlan_id;
615 mac_t mac_addr; 582 mac_t mac_addr;
616}; 583};
617 584
618/** 585/* MAC Multicast
619 * MAC Multicast
620 * 586 *
621 * bfi_enet_mac_mfilter_add_req is used by: 587 * bfi_enet_mac_mfilter_add_req is used by:
622 * BFI_ENET_H2I_MAC_MCAST_ADD_REQ 588 * BFI_ENET_H2I_MAC_MCAST_ADD_REQ
@@ -627,8 +593,7 @@ struct bfi_enet_mcast_add_req {
627 u8 rsvd[2]; 593 u8 rsvd[2];
628}; 594};
629 595
630/** 596/* bfi_enet_mac_mfilter_add_rsp is used by:
631 * bfi_enet_mac_mfilter_add_rsp is used by:
632 * BFI_ENET_I2H_MAC_MCAST_ADD_RSP 597 * BFI_ENET_I2H_MAC_MCAST_ADD_RSP
633 */ 598 */
634struct bfi_enet_mcast_add_rsp { 599struct bfi_enet_mcast_add_rsp {
@@ -640,8 +605,7 @@ struct bfi_enet_mcast_add_rsp {
640 u8 rsvd1[2]; 605 u8 rsvd1[2];
641}; 606};
642 607
643/** 608/* bfi_enet_mac_mfilter_del_req is used by:
644 * bfi_enet_mac_mfilter_del_req is used by:
645 * BFI_ENET_H2I_MAC_MCAST_DEL_REQ 609 * BFI_ENET_H2I_MAC_MCAST_DEL_REQ
646 */ 610 */
647struct bfi_enet_mcast_del_req { 611struct bfi_enet_mcast_del_req {
@@ -650,8 +614,7 @@ struct bfi_enet_mcast_del_req {
650 u8 rsvd[2]; 614 u8 rsvd[2];
651}; 615};
652 616
653/** 617/* VLAN
654 * VLAN
655 * 618 *
656 * bfi_enet_rx_vlan_req is used by: 619 * bfi_enet_rx_vlan_req is used by:
657 * BFI_ENET_H2I_RX_VLAN_SET_REQ 620 * BFI_ENET_H2I_RX_VLAN_SET_REQ
@@ -663,8 +626,7 @@ struct bfi_enet_rx_vlan_req {
663 u32 bit_mask[BFI_ENET_VLAN_WORDS_MAX]; 626 u32 bit_mask[BFI_ENET_VLAN_WORDS_MAX];
664}; 627};
665 628
666/** 629/* PAUSE
667 * PAUSE
668 * 630 *
669 * bfi_enet_set_pause_req is used by: 631 * bfi_enet_set_pause_req is used by:
670 * BFI_ENET_H2I_SET_PAUSE_REQ 632 * BFI_ENET_H2I_SET_PAUSE_REQ
@@ -676,8 +638,7 @@ struct bfi_enet_set_pause_req {
676 u8 rx_pause; /* 1 = enable; 0 = disable */ 638 u8 rx_pause; /* 1 = enable; 0 = disable */
677}; 639};
678 640
679/** 641/* DIAGNOSTICS
680 * DIAGNOSTICS
681 * 642 *
682 * bfi_enet_diag_lb_req is used by: 643 * bfi_enet_diag_lb_req is used by:
683 * BFI_ENET_H2I_DIAG_LOOPBACK 644 * BFI_ENET_H2I_DIAG_LOOPBACK
@@ -689,16 +650,13 @@ struct bfi_enet_diag_lb_req {
689 u8 enable; /* 1 = enable; 0 = disable */ 650 u8 enable; /* 1 = enable; 0 = disable */
690}; 651};
691 652
692/** 653/* enum for Loopback opmodes */
693 * enum for Loopback opmodes
694 */
695enum { 654enum {
696 BFI_ENET_DIAG_LB_OPMODE_EXT = 0, 655 BFI_ENET_DIAG_LB_OPMODE_EXT = 0,
697 BFI_ENET_DIAG_LB_OPMODE_CBL = 1, 656 BFI_ENET_DIAG_LB_OPMODE_CBL = 1,
698}; 657};
699 658
700/** 659/* STATISTICS
701 * STATISTICS
702 * 660 *
703 * bfi_enet_stats_req is used by: 661 * bfi_enet_stats_req is used by:
704 * BFI_ENET_H2I_STATS_GET_REQ 662 * BFI_ENET_H2I_STATS_GET_REQ
@@ -713,9 +671,7 @@ struct bfi_enet_stats_req {
713 union bfi_addr_u host_buffer; 671 union bfi_addr_u host_buffer;
714}; 672};
715 673
716/** 674/* defines for "stats_mask" above. */
717 * defines for "stats_mask" above.
718 */
719#define BFI_ENET_STATS_MAC (1 << 0) /* !< MAC Statistics */ 675#define BFI_ENET_STATS_MAC (1 << 0) /* !< MAC Statistics */
720#define BFI_ENET_STATS_BPC (1 << 1) /* !< Pause Stats from BPC */ 676#define BFI_ENET_STATS_BPC (1 << 1) /* !< Pause Stats from BPC */
721#define BFI_ENET_STATS_RAD (1 << 2) /* !< Rx Admission Statistics */ 677#define BFI_ENET_STATS_RAD (1 << 2) /* !< Rx Admission Statistics */
@@ -881,8 +837,7 @@ struct bfi_enet_stats_mac {
881 u64 tx_fragments; 837 u64 tx_fragments;
882}; 838};
883 839
884/** 840/* Complete statistics, DMAed from fw to host followed by
885 * Complete statistics, DMAed from fw to host followed by
886 * BFI_ENET_I2H_STATS_GET_RSP 841 * BFI_ENET_I2H_STATS_GET_RSP
887 */ 842 */
888struct bfi_enet_stats { 843struct bfi_enet_stats {
diff --git a/drivers/net/ethernet/brocade/bna/bfi_reg.h b/drivers/net/ethernet/brocade/bna/bfi_reg.h
index 0e094fe46dfd..c49fa312ddbd 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_reg.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_reg.h
@@ -221,9 +221,7 @@ enum {
221#define __PMM_1T_RESET_P 0x00000001 221#define __PMM_1T_RESET_P 0x00000001
222#define PMM_1T_RESET_REG_P1 0x00023c1c 222#define PMM_1T_RESET_REG_P1 0x00023c1c
223 223
224/** 224/* Brocade 1860 Adapter specific defines */
225 * Brocade 1860 Adapter specific defines
226 */
227#define CT2_PCI_CPQ_BASE 0x00030000 225#define CT2_PCI_CPQ_BASE 0x00030000
228#define CT2_PCI_APP_BASE 0x00030100 226#define CT2_PCI_APP_BASE 0x00030100
229#define CT2_PCI_ETH_BASE 0x00030400 227#define CT2_PCI_ETH_BASE 0x00030400
diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h
index 4d7a5de08e12..ede532b4e9db 100644
--- a/drivers/net/ethernet/brocade/bna/bna.h
+++ b/drivers/net/ethernet/brocade/bna/bna.h
@@ -25,11 +25,7 @@
25 25
26extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX]; 26extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
27 27
28/** 28/* Macros and constants */
29 *
30 * Macros and constants
31 *
32 */
33 29
34#define BNA_IOC_TIMER_FREQ 200 30#define BNA_IOC_TIMER_FREQ 200
35 31
@@ -356,11 +352,7 @@ do { \
356 } \ 352 } \
357} while (0) 353} while (0)
358 354
359/** 355/* Inline functions */
360 *
361 * Inline functions
362 *
363 */
364 356
365static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr) 357static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr)
366{ 358{
@@ -377,15 +369,9 @@ static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr)
377 369
378#define bna_attr(_bna) (&(_bna)->ioceth.attr) 370#define bna_attr(_bna) (&(_bna)->ioceth.attr)
379 371
380/** 372/* Function prototypes */
381 *
382 * Function prototypes
383 *
384 */
385 373
386/** 374/* BNA */
387 * BNA
388 */
389 375
390/* FW response handlers */ 376/* FW response handlers */
391void bna_bfi_stats_clr_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr); 377void bna_bfi_stats_clr_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr);
@@ -413,24 +399,19 @@ struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod);
413void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod, 399void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
414 struct bna_mcam_handle *handle); 400 struct bna_mcam_handle *handle);
415 401
416/** 402/* MBOX */
417 * MBOX
418 */
419 403
420/* API for BNAD */ 404/* API for BNAD */
421void bna_mbox_handler(struct bna *bna, u32 intr_status); 405void bna_mbox_handler(struct bna *bna, u32 intr_status);
422 406
423/** 407/* ETHPORT */
424 * ETHPORT
425 */
426 408
427/* Callbacks for RX */ 409/* Callbacks for RX */
428void bna_ethport_cb_rx_started(struct bna_ethport *ethport); 410void bna_ethport_cb_rx_started(struct bna_ethport *ethport);
429void bna_ethport_cb_rx_stopped(struct bna_ethport *ethport); 411void bna_ethport_cb_rx_stopped(struct bna_ethport *ethport);
430 412
431/** 413/* TX MODULE AND TX */
432 * TX MODULE AND TX 414
433 */
434/* FW response handelrs */ 415/* FW response handelrs */
435void bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, 416void bna_bfi_tx_enet_start_rsp(struct bna_tx *tx,
436 struct bfi_msgq_mhdr *msghdr); 417 struct bfi_msgq_mhdr *msghdr);
@@ -462,9 +443,7 @@ void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
462void bna_tx_cleanup_complete(struct bna_tx *tx); 443void bna_tx_cleanup_complete(struct bna_tx *tx);
463void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo); 444void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo);
464 445
465/** 446/* RX MODULE, RX, RXF */
466 * RX MODULE, RX, RXF
467 */
468 447
469/* FW response handlers */ 448/* FW response handlers */
470void bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, 449void bna_bfi_rx_enet_start_rsp(struct bna_rx *rx,
@@ -522,9 +501,7 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
522void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id); 501void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
523void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id); 502void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
524void bna_rx_vlanfilter_enable(struct bna_rx *rx); 503void bna_rx_vlanfilter_enable(struct bna_rx *rx);
525/** 504/* ENET */
526 * ENET
527 */
528 505
529/* API for RX */ 506/* API for RX */
530int bna_enet_mtu_get(struct bna_enet *enet); 507int bna_enet_mtu_get(struct bna_enet *enet);
@@ -544,18 +521,14 @@ void bna_enet_mtu_set(struct bna_enet *enet, int mtu,
544 void (*cbfn)(struct bnad *)); 521 void (*cbfn)(struct bnad *));
545void bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac); 522void bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac);
546 523
547/** 524/* IOCETH */
548 * IOCETH
549 */
550 525
551/* APIs for BNAD */ 526/* APIs for BNAD */
552void bna_ioceth_enable(struct bna_ioceth *ioceth); 527void bna_ioceth_enable(struct bna_ioceth *ioceth);
553void bna_ioceth_disable(struct bna_ioceth *ioceth, 528void bna_ioceth_disable(struct bna_ioceth *ioceth,
554 enum bna_cleanup_type type); 529 enum bna_cleanup_type type);
555 530
556/** 531/* BNAD */
557 * BNAD
558 */
559 532
560/* Callbacks for ENET */ 533/* Callbacks for ENET */
561void bnad_cb_ethport_link_status(struct bnad *bnad, 534void bnad_cb_ethport_link_status(struct bnad *bnad,
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index 9ccc586e3767..db14f69d63bc 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -378,9 +378,8 @@ bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
378 } 378 }
379} 379}
380 380
381/** 381/* ETHPORT */
382 * ETHPORT 382
383 */
384#define call_ethport_stop_cbfn(_ethport) \ 383#define call_ethport_stop_cbfn(_ethport) \
385do { \ 384do { \
386 if ((_ethport)->stop_cbfn) { \ 385 if ((_ethport)->stop_cbfn) { \
@@ -804,9 +803,8 @@ bna_ethport_cb_rx_stopped(struct bna_ethport *ethport)
804 } 803 }
805} 804}
806 805
807/** 806/* ENET */
808 * ENET 807
809 */
810#define bna_enet_chld_start(enet) \ 808#define bna_enet_chld_start(enet) \
811do { \ 809do { \
812 enum bna_tx_type tx_type = \ 810 enum bna_tx_type tx_type = \
@@ -1328,9 +1326,8 @@ bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac)
1328 *mac = bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc); 1326 *mac = bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc);
1329} 1327}
1330 1328
1331/** 1329/* IOCETH */
1332 * IOCETH 1330
1333 */
1334#define enable_mbox_intr(_ioceth) \ 1331#define enable_mbox_intr(_ioceth) \
1335do { \ 1332do { \
1336 u32 intr_status; \ 1333 u32 intr_status; \
diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
index 4c6aab2a9534..b8c4e21fbf4c 100644
--- a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
@@ -16,20 +16,15 @@
16 * www.brocade.com 16 * www.brocade.com
17 */ 17 */
18 18
19/** 19/* File for interrupt macros and functions */
20 * File for interrupt macros and functions
21 */
22 20
23#ifndef __BNA_HW_DEFS_H__ 21#ifndef __BNA_HW_DEFS_H__
24#define __BNA_HW_DEFS_H__ 22#define __BNA_HW_DEFS_H__
25 23
26#include "bfi_reg.h" 24#include "bfi_reg.h"
27 25
28/** 26/* SW imposed limits */
29 * 27
30 * SW imposed limits
31 *
32 */
33#define BFI_ENET_DEF_TXQ 1 28#define BFI_ENET_DEF_TXQ 1
34#define BFI_ENET_DEF_RXP 1 29#define BFI_ENET_DEF_RXP 1
35#define BFI_ENET_DEF_UCAM 1 30#define BFI_ENET_DEF_UCAM 1
@@ -141,11 +136,8 @@
141} 136}
142 137
143#define bna_port_id_get(_bna) ((_bna)->ioceth.ioc.port_id) 138#define bna_port_id_get(_bna) ((_bna)->ioceth.ioc.port_id)
144/** 139
145 * 140/* Interrupt related bits, flags and macros */
146 * Interrupt related bits, flags and macros
147 *
148 */
149 141
150#define IB_STATUS_BITS 0x0000ffff 142#define IB_STATUS_BITS 0x0000ffff
151 143
@@ -280,11 +272,7 @@ do { \
280 (writel(BNA_DOORBELL_Q_PRD_IDX((_rcb)->producer_index), \ 272 (writel(BNA_DOORBELL_Q_PRD_IDX((_rcb)->producer_index), \
281 (_rcb)->q_dbell)); 273 (_rcb)->q_dbell));
282 274
283/** 275/* TxQ, RxQ, CQ related bits, offsets, macros */
284 *
285 * TxQ, RxQ, CQ related bits, offsets, macros
286 *
287 */
288 276
289/* TxQ Entry Opcodes */ 277/* TxQ Entry Opcodes */
290#define BNA_TXQ_WI_SEND (0x402) /* Single Frame Transmission */ 278#define BNA_TXQ_WI_SEND (0x402) /* Single Frame Transmission */
@@ -334,11 +322,7 @@ do { \
334 322
335#define BNA_CQ_EF_LOCAL (1 << 20) 323#define BNA_CQ_EF_LOCAL (1 << 20)
336 324
337/** 325/* Data structures */
338 *
339 * Data structures
340 *
341 */
342 326
343struct bna_reg_offset { 327struct bna_reg_offset {
344 u32 fn_int_status; 328 u32 fn_int_status;
@@ -371,8 +355,7 @@ struct bna_txq_wi_vector {
371 struct bna_dma_addr host_addr; /* Tx-Buf DMA addr */ 355 struct bna_dma_addr host_addr; /* Tx-Buf DMA addr */
372}; 356};
373 357
374/** 358/* TxQ Entry Structure
375 * TxQ Entry Structure
376 * 359 *
377 * BEWARE: Load values into this structure with correct endianess. 360 * BEWARE: Load values into this structure with correct endianess.
378 */ 361 */
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 276fcb589f4b..71144b396e02 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -18,9 +18,7 @@
18#include "bna.h" 18#include "bna.h"
19#include "bfi.h" 19#include "bfi.h"
20 20
21/** 21/* IB */
22 * IB
23 */
24static void 22static void
25bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo) 23bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
26{ 24{
@@ -29,9 +27,7 @@ bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
29 (u32)ib->coalescing_timeo, 0); 27 (u32)ib->coalescing_timeo, 0);
30} 28}
31 29
32/** 30/* RXF */
33 * RXF
34 */
35 31
36#define bna_rxf_vlan_cfg_soft_reset(rxf) \ 32#define bna_rxf_vlan_cfg_soft_reset(rxf) \
37do { \ 33do { \
@@ -1312,9 +1308,7 @@ bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf)
1312 return 0; 1308 return 0;
1313} 1309}
1314 1310
1315/** 1311/* RX */
1316 * RX
1317 */
1318 1312
1319#define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \ 1313#define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1320 (qcfg)->num_paths : ((qcfg)->num_paths * 2)) 1314 (qcfg)->num_paths : ((qcfg)->num_paths * 2))
@@ -2791,9 +2785,8 @@ const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
2791 {1, 2}, 2785 {1, 2},
2792}; 2786};
2793 2787
2794/** 2788/* TX */
2795 * TX 2789
2796 */
2797#define call_tx_stop_cbfn(tx) \ 2790#define call_tx_stop_cbfn(tx) \
2798do { \ 2791do { \
2799 if ((tx)->stop_cbfn) { \ 2792 if ((tx)->stop_cbfn) { \
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index e8d3ab7ea6cb..d3eb8bddfb2a 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -23,11 +23,7 @@
23#include "bfa_cee.h" 23#include "bfa_cee.h"
24#include "bfa_msgq.h" 24#include "bfa_msgq.h"
25 25
26/** 26/* Forward declarations */
27 *
28 * Forward declarations
29 *
30 */
31 27
32struct bna_mcam_handle; 28struct bna_mcam_handle;
33struct bna_txq; 29struct bna_txq;
@@ -40,11 +36,7 @@ struct bna_enet;
40struct bna; 36struct bna;
41struct bnad; 37struct bnad;
42 38
43/** 39/* Enums, primitive data types */
44 *
45 * Enums, primitive data types
46 *
47 */
48 40
49enum bna_status { 41enum bna_status {
50 BNA_STATUS_T_DISABLED = 0, 42 BNA_STATUS_T_DISABLED = 0,
@@ -331,11 +323,7 @@ struct bna_attr {
331 int max_rit_size; 323 int max_rit_size;
332}; 324};
333 325
334/** 326/* IOCEth */
335 *
336 * IOCEth
337 *
338 */
339 327
340struct bna_ioceth { 328struct bna_ioceth {
341 bfa_fsm_t fsm; 329 bfa_fsm_t fsm;
@@ -351,11 +339,7 @@ struct bna_ioceth {
351 struct bna *bna; 339 struct bna *bna;
352}; 340};
353 341
354/** 342/* Enet */
355 *
356 * Enet
357 *
358 */
359 343
360/* Pause configuration */ 344/* Pause configuration */
361struct bna_pause_config { 345struct bna_pause_config {
@@ -390,11 +374,7 @@ struct bna_enet {
390 struct bna *bna; 374 struct bna *bna;
391}; 375};
392 376
393/** 377/* Ethport */
394 *
395 * Ethport
396 *
397 */
398 378
399struct bna_ethport { 379struct bna_ethport {
400 bfa_fsm_t fsm; 380 bfa_fsm_t fsm;
@@ -419,11 +399,7 @@ struct bna_ethport {
419 struct bna *bna; 399 struct bna *bna;
420}; 400};
421 401
422/** 402/* Interrupt Block */
423 *
424 * Interrupt Block
425 *
426 */
427 403
428/* Doorbell structure */ 404/* Doorbell structure */
429struct bna_ib_dbell { 405struct bna_ib_dbell {
@@ -447,11 +423,7 @@ struct bna_ib {
447 int interpkt_timeo; 423 int interpkt_timeo;
448}; 424};
449 425
450/** 426/* Tx object */
451 *
452 * Tx object
453 *
454 */
455 427
456/* Tx datapath control structure */ 428/* Tx datapath control structure */
457#define BNA_Q_NAME_SIZE 16 429#define BNA_Q_NAME_SIZE 16
@@ -585,11 +557,7 @@ struct bna_tx_mod {
585 struct bna *bna; 557 struct bna *bna;
586}; 558};
587 559
588/** 560/* Rx object */
589 *
590 * Rx object
591 *
592 */
593 561
594/* Rx datapath control structure */ 562/* Rx datapath control structure */
595struct bna_rcb { 563struct bna_rcb {
@@ -898,11 +866,7 @@ struct bna_rx_mod {
898 u32 rid_mask; 866 u32 rid_mask;
899}; 867};
900 868
901/** 869/* CAM */
902 *
903 * CAM
904 *
905 */
906 870
907struct bna_ucam_mod { 871struct bna_ucam_mod {
908 struct bna_mac *ucmac; /* BFI_MAX_UCMAC entries */ 872 struct bna_mac *ucmac; /* BFI_MAX_UCMAC entries */
@@ -927,11 +891,7 @@ struct bna_mcam_mod {
927 struct bna *bna; 891 struct bna *bna;
928}; 892};
929 893
930/** 894/* Statistics */
931 *
932 * Statistics
933 *
934 */
935 895
936struct bna_stats { 896struct bna_stats {
937 struct bna_dma_addr hw_stats_dma; 897 struct bna_dma_addr hw_stats_dma;
@@ -949,11 +909,7 @@ struct bna_stats_mod {
949 struct bfi_enet_stats_req stats_clr; 909 struct bfi_enet_stats_req stats_clr;
950}; 910};
951 911
952/** 912/* BNA */
953 *
954 * BNA
955 *
956 */
957 913
958struct bna { 914struct bna {
959 struct bna_ident ident; 915 struct bna_ident ident;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 67cd2ed0306a..b441f33258e7 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -1302,8 +1302,7 @@ bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1302 return 0; 1302 return 0;
1303} 1303}
1304 1304
1305/** 1305/* NOTE: Should be called for MSIX only
1306 * NOTE: Should be called for MSIX only
1307 * Unregisters Tx MSIX vector(s) from the kernel 1306 * Unregisters Tx MSIX vector(s) from the kernel
1308 */ 1307 */
1309static void 1308static void
@@ -1322,8 +1321,7 @@ bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1322 } 1321 }
1323} 1322}
1324 1323
1325/** 1324/* NOTE: Should be called for MSIX only
1326 * NOTE: Should be called for MSIX only
1327 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel 1325 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1328 */ 1326 */
1329static int 1327static int
@@ -1354,8 +1352,7 @@ err_return:
1354 return -1; 1352 return -1;
1355} 1353}
1356 1354
1357/** 1355/* NOTE: Should be called for MSIX only
1358 * NOTE: Should be called for MSIX only
1359 * Unregisters Rx MSIX vector(s) from the kernel 1356 * Unregisters Rx MSIX vector(s) from the kernel
1360 */ 1357 */
1361static void 1358static void
@@ -1375,8 +1372,7 @@ bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1375 } 1372 }
1376} 1373}
1377 1374
1378/** 1375/* NOTE: Should be called for MSIX only
1379 * NOTE: Should be called for MSIX only
1380 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel 1376 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1381 */ 1377 */
1382static int 1378static int
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index 72742be11277..d78339224751 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -389,9 +389,7 @@ extern void bnad_netdev_hwstats_fill(struct bnad *bnad,
389void bnad_debugfs_init(struct bnad *bnad); 389void bnad_debugfs_init(struct bnad *bnad);
390void bnad_debugfs_uninit(struct bnad *bnad); 390void bnad_debugfs_uninit(struct bnad *bnad);
391 391
392/** 392/* MACROS */
393 * MACROS
394 */
395/* To set & get the stats counters */ 393/* To set & get the stats counters */
396#define BNAD_UPDATE_CTR(_bnad, _ctr) \ 394#define BNAD_UPDATE_CTR(_bnad, _ctr) \
397 (((_bnad)->stats.drv_stats._ctr)++) 395 (((_bnad)->stats.drv_stats._ctr)++)
diff --git a/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
index cfc22a64157e..6a68e8d93309 100644
--- a/drivers/net/ethernet/brocade/bna/cna_fwimg.c
+++ b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
@@ -67,10 +67,10 @@ bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off)
67{ 67{
68 switch (asic_gen) { 68 switch (asic_gen) {
69 case BFI_ASIC_GEN_CT: 69 case BFI_ASIC_GEN_CT:
70 return (u32 *)(bfi_image_ct_cna + off); 70 return (bfi_image_ct_cna + off);
71 break; 71 break;
72 case BFI_ASIC_GEN_CT2: 72 case BFI_ASIC_GEN_CT2:
73 return (u32 *)(bfi_image_ct2_cna + off); 73 return (bfi_image_ct2_cna + off);
74 break; 74 break;
75 default: 75 default:
76 return NULL; 76 return NULL;
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 1466bc4e3dda..033064b7b576 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -179,13 +179,16 @@ static void macb_handle_link_change(struct net_device *dev)
179 spin_unlock_irqrestore(&bp->lock, flags); 179 spin_unlock_irqrestore(&bp->lock, flags);
180 180
181 if (status_change) { 181 if (status_change) {
182 if (phydev->link) 182 if (phydev->link) {
183 netif_carrier_on(dev);
183 netdev_info(dev, "link up (%d/%s)\n", 184 netdev_info(dev, "link up (%d/%s)\n",
184 phydev->speed, 185 phydev->speed,
185 phydev->duplex == DUPLEX_FULL ? 186 phydev->duplex == DUPLEX_FULL ?
186 "Full" : "Half"); 187 "Full" : "Half");
187 else 188 } else {
189 netif_carrier_off(dev);
188 netdev_info(dev, "link down\n"); 190 netdev_info(dev, "link down\n");
191 }
189 } 192 }
190} 193}
191 194
@@ -1033,6 +1036,9 @@ static int macb_open(struct net_device *dev)
1033 1036
1034 netdev_dbg(bp->dev, "open\n"); 1037 netdev_dbg(bp->dev, "open\n");
1035 1038
1039 /* carrier starts down */
1040 netif_carrier_off(dev);
1041
1036 /* if the phy is not yet register, retry later*/ 1042 /* if the phy is not yet register, retry later*/
1037 if (!bp->phy_dev) 1043 if (!bp->phy_dev)
1038 return -EAGAIN; 1044 return -EAGAIN;
@@ -1406,6 +1412,8 @@ static int __init macb_probe(struct platform_device *pdev)
1406 1412
1407 platform_set_drvdata(pdev, dev); 1413 platform_set_drvdata(pdev, dev);
1408 1414
1415 netif_carrier_off(dev);
1416
1409 netdev_info(dev, "Cadence %s at 0x%08lx irq %d (%pM)\n", 1417 netdev_info(dev, "Cadence %s at 0x%08lx irq %d (%pM)\n",
1410 macb_is_gem(bp) ? "GEM" : "MACB", dev->base_addr, 1418 macb_is_gem(bp) ? "GEM" : "MACB", dev->base_addr,
1411 dev->irq, dev->dev_addr); 1419 dev->irq, dev->dev_addr);
@@ -1469,6 +1477,7 @@ static int macb_suspend(struct platform_device *pdev, pm_message_t state)
1469 struct net_device *netdev = platform_get_drvdata(pdev); 1477 struct net_device *netdev = platform_get_drvdata(pdev);
1470 struct macb *bp = netdev_priv(netdev); 1478 struct macb *bp = netdev_priv(netdev);
1471 1479
1480 netif_carrier_off(netdev);
1472 netif_device_detach(netdev); 1481 netif_device_detach(netdev);
1473 1482
1474 clk_disable(bp->hclk); 1483 clk_disable(bp->hclk);
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 11f667f6131a..2b4b4f529ab4 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -264,7 +264,7 @@
264#define XGMAC_OMR_FEF 0x00000080 /* Forward Error Frames */ 264#define XGMAC_OMR_FEF 0x00000080 /* Forward Error Frames */
265#define XGMAC_OMR_DT 0x00000040 /* Drop TCP/IP csum Errors */ 265#define XGMAC_OMR_DT 0x00000040 /* Drop TCP/IP csum Errors */
266#define XGMAC_OMR_RSF 0x00000020 /* RX FIFO Store and Forward */ 266#define XGMAC_OMR_RSF 0x00000020 /* RX FIFO Store and Forward */
267#define XGMAC_OMR_RTC 0x00000010 /* RX Threshhold Ctrl */ 267#define XGMAC_OMR_RTC_256 0x00000018 /* RX Threshhold Ctrl */
268#define XGMAC_OMR_RTC_MASK 0x00000018 /* RX Threshhold Ctrl MASK */ 268#define XGMAC_OMR_RTC_MASK 0x00000018 /* RX Threshhold Ctrl MASK */
269 269
270/* XGMAC HW Features Register */ 270/* XGMAC HW Features Register */
@@ -671,26 +671,23 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
671 671
672 p = priv->dma_rx + entry; 672 p = priv->dma_rx + entry;
673 673
674 if (priv->rx_skbuff[entry] != NULL) 674 if (priv->rx_skbuff[entry] == NULL) {
675 continue; 675 skb = __skb_dequeue(&priv->rx_recycle);
676 676 if (skb == NULL)
677 skb = __skb_dequeue(&priv->rx_recycle); 677 skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
678 if (skb == NULL) 678 if (unlikely(skb == NULL))
679 skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz); 679 break;
680 if (unlikely(skb == NULL)) 680
681 break; 681 priv->rx_skbuff[entry] = skb;
682 682 paddr = dma_map_single(priv->device, skb->data,
683 priv->rx_skbuff[entry] = skb; 683 priv->dma_buf_sz, DMA_FROM_DEVICE);
684 paddr = dma_map_single(priv->device, skb->data, 684 desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
685 priv->dma_buf_sz, DMA_FROM_DEVICE); 685 }
686 desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
687 686
688 netdev_dbg(priv->dev, "rx ring: head %d, tail %d\n", 687 netdev_dbg(priv->dev, "rx ring: head %d, tail %d\n",
689 priv->rx_head, priv->rx_tail); 688 priv->rx_head, priv->rx_tail);
690 689
691 priv->rx_head = dma_ring_incr(priv->rx_head, DMA_RX_RING_SZ); 690 priv->rx_head = dma_ring_incr(priv->rx_head, DMA_RX_RING_SZ);
692 /* Ensure descriptor is in memory before handing to h/w */
693 wmb();
694 desc_set_rx_owner(p); 691 desc_set_rx_owner(p);
695 } 692 }
696} 693}
@@ -933,6 +930,7 @@ static void xgmac_tx_err(struct xgmac_priv *priv)
933 desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ); 930 desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
934 priv->tx_tail = 0; 931 priv->tx_tail = 0;
935 priv->tx_head = 0; 932 priv->tx_head = 0;
933 writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR);
936 writel(reg | DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL); 934 writel(reg | DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
937 935
938 writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS, 936 writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS,
@@ -972,7 +970,7 @@ static int xgmac_hw_init(struct net_device *dev)
972 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); 970 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
973 971
974 /* XGMAC requires AXI bus init. This is a 'magic number' for now */ 972 /* XGMAC requires AXI bus init. This is a 'magic number' for now */
975 writel(0x000100E, ioaddr + XGMAC_DMA_AXI_BUS); 973 writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS);
976 974
977 ctrl |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_JE | XGMAC_CONTROL_ACS | 975 ctrl |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_JE | XGMAC_CONTROL_ACS |
978 XGMAC_CONTROL_CAR; 976 XGMAC_CONTROL_CAR;
@@ -984,7 +982,8 @@ static int xgmac_hw_init(struct net_device *dev)
984 writel(value, ioaddr + XGMAC_DMA_CONTROL); 982 writel(value, ioaddr + XGMAC_DMA_CONTROL);
985 983
986 /* Set the HW DMA mode and the COE */ 984 /* Set the HW DMA mode and the COE */
987 writel(XGMAC_OMR_TSF | XGMAC_OMR_RSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA, 985 writel(XGMAC_OMR_TSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA |
986 XGMAC_OMR_RTC_256,
988 ioaddr + XGMAC_OMR); 987 ioaddr + XGMAC_OMR);
989 988
990 /* Reset the MMC counters */ 989 /* Reset the MMC counters */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index abb6ce7c1b7e..6505070abcfa 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3050,7 +3050,7 @@ static struct pci_error_handlers t3_err_handler = {
3050static void set_nqsets(struct adapter *adap) 3050static void set_nqsets(struct adapter *adap)
3051{ 3051{
3052 int i, j = 0; 3052 int i, j = 0;
3053 int num_cpus = num_online_cpus(); 3053 int num_cpus = netif_get_num_default_rss_queues();
3054 int hwports = adap->params.nports; 3054 int hwports = adap->params.nports;
3055 int nqsets = adap->msix_nvectors - 1; 3055 int nqsets = adap->msix_nvectors - 1;
3056 3056
@@ -3173,6 +3173,9 @@ static void __devinit cxgb3_init_iscsi_mac(struct net_device *dev)
3173 pi->iscsic.mac_addr[3] |= 0x80; 3173 pi->iscsic.mac_addr[3] |= 0x80;
3174} 3174}
3175 3175
3176#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
3177#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
3178 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3176static int __devinit init_one(struct pci_dev *pdev, 3179static int __devinit init_one(struct pci_dev *pdev,
3177 const struct pci_device_id *ent) 3180 const struct pci_device_id *ent)
3178{ 3181{
@@ -3293,6 +3296,7 @@ static int __devinit init_one(struct pci_dev *pdev,
3293 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | 3296 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
3294 NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX; 3297 NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX;
3295 netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_TX; 3298 netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_TX;
3299 netdev->vlan_features |= netdev->features & VLAN_FEAT;
3296 if (pci_using_dac) 3300 if (pci_using_dac)
3297 netdev->features |= NETIF_F_HIGHDMA; 3301 netdev->features |= NETIF_F_HIGHDMA;
3298 3302
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 65e4b280619a..2dbbcbb450d3 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -62,7 +62,9 @@ static const unsigned int MAX_ATIDS = 64 * 1024;
62static const unsigned int ATID_BASE = 0x10000; 62static const unsigned int ATID_BASE = 0x10000;
63 63
64static void cxgb_neigh_update(struct neighbour *neigh); 64static void cxgb_neigh_update(struct neighbour *neigh);
65static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new); 65static void cxgb_redirect(struct dst_entry *old, struct neighbour *old_neigh,
66 struct dst_entry *new, struct neighbour *new_neigh,
67 const void *daddr);
66 68
67static inline int offload_activated(struct t3cdev *tdev) 69static inline int offload_activated(struct t3cdev *tdev)
68{ 70{
@@ -575,7 +577,7 @@ static void t3_process_tid_release_list(struct work_struct *work)
575 if (!skb) { 577 if (!skb) {
576 spin_lock_bh(&td->tid_release_lock); 578 spin_lock_bh(&td->tid_release_lock);
577 p->ctx = (void *)td->tid_release_list; 579 p->ctx = (void *)td->tid_release_list;
578 td->tid_release_list = (struct t3c_tid_entry *)p; 580 td->tid_release_list = p;
579 break; 581 break;
580 } 582 }
581 mk_tid_release(skb, p - td->tid_maps.tid_tab); 583 mk_tid_release(skb, p - td->tid_maps.tid_tab);
@@ -968,8 +970,10 @@ static int nb_callback(struct notifier_block *self, unsigned long event,
968 } 970 }
969 case (NETEVENT_REDIRECT):{ 971 case (NETEVENT_REDIRECT):{
970 struct netevent_redirect *nr = ctx; 972 struct netevent_redirect *nr = ctx;
971 cxgb_redirect(nr->old, nr->new); 973 cxgb_redirect(nr->old, nr->old_neigh,
972 cxgb_neigh_update(dst_get_neighbour_noref(nr->new)); 974 nr->new, nr->new_neigh,
975 nr->daddr);
976 cxgb_neigh_update(nr->new_neigh);
973 break; 977 break;
974 } 978 }
975 default: 979 default:
@@ -1107,10 +1111,11 @@ static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
1107 tdev->send(tdev, skb); 1111 tdev->send(tdev, skb);
1108} 1112}
1109 1113
1110static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new) 1114static void cxgb_redirect(struct dst_entry *old, struct neighbour *old_neigh,
1115 struct dst_entry *new, struct neighbour *new_neigh,
1116 const void *daddr)
1111{ 1117{
1112 struct net_device *olddev, *newdev; 1118 struct net_device *olddev, *newdev;
1113 struct neighbour *n;
1114 struct tid_info *ti; 1119 struct tid_info *ti;
1115 struct t3cdev *tdev; 1120 struct t3cdev *tdev;
1116 u32 tid; 1121 u32 tid;
@@ -1118,15 +1123,8 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
1118 struct l2t_entry *e; 1123 struct l2t_entry *e;
1119 struct t3c_tid_entry *te; 1124 struct t3c_tid_entry *te;
1120 1125
1121 n = dst_get_neighbour_noref(old); 1126 olddev = old_neigh->dev;
1122 if (!n) 1127 newdev = new_neigh->dev;
1123 return;
1124 olddev = n->dev;
1125
1126 n = dst_get_neighbour_noref(new);
1127 if (!n)
1128 return;
1129 newdev = n->dev;
1130 1128
1131 if (!is_offloading(olddev)) 1129 if (!is_offloading(olddev))
1132 return; 1130 return;
@@ -1144,7 +1142,7 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
1144 } 1142 }
1145 1143
1146 /* Add new L2T entry */ 1144 /* Add new L2T entry */
1147 e = t3_l2t_get(tdev, new, newdev); 1145 e = t3_l2t_get(tdev, new, newdev, daddr);
1148 if (!e) { 1146 if (!e) {
1149 printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n", 1147 printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
1150 __func__); 1148 __func__);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 3fa3c8833ed7..8d53438638b2 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -299,7 +299,7 @@ static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
299} 299}
300 300
301struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst, 301struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
302 struct net_device *dev) 302 struct net_device *dev, const void *daddr)
303{ 303{
304 struct l2t_entry *e = NULL; 304 struct l2t_entry *e = NULL;
305 struct neighbour *neigh; 305 struct neighbour *neigh;
@@ -311,7 +311,7 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
311 int smt_idx; 311 int smt_idx;
312 312
313 rcu_read_lock(); 313 rcu_read_lock();
314 neigh = dst_get_neighbour_noref(dst); 314 neigh = dst_neigh_lookup(dst, daddr);
315 if (!neigh) 315 if (!neigh)
316 goto done_rcu; 316 goto done_rcu;
317 317
@@ -360,6 +360,8 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
360done_unlock: 360done_unlock:
361 write_unlock_bh(&d->lock); 361 write_unlock_bh(&d->lock);
362done_rcu: 362done_rcu:
363 if (neigh)
364 neigh_release(neigh);
363 rcu_read_unlock(); 365 rcu_read_unlock();
364 return e; 366 return e;
365} 367}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
index c4e864369751..8cffcdfd5678 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
@@ -110,7 +110,7 @@ static inline void set_arp_failure_handler(struct sk_buff *skb,
110void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e); 110void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
111void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh); 111void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh);
112struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst, 112struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
113 struct net_device *dev); 113 struct net_device *dev, const void *daddr);
114int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb, 114int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
115 struct l2t_entry *e); 115 struct l2t_entry *e);
116void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e); 116void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index cfb60e1f51da..dd901c5061b9 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -2877,7 +2877,7 @@ static void sge_timer_tx(unsigned long data)
2877 mod_timer(&qs->tx_reclaim_timer, jiffies + next_period); 2877 mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
2878} 2878}
2879 2879
2880/* 2880/**
2881 * sge_timer_rx - perform periodic maintenance of an SGE qset 2881 * sge_timer_rx - perform periodic maintenance of an SGE qset
2882 * @data: the SGE queue set to maintain 2882 * @data: the SGE queue set to maintain
2883 * 2883 *
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index 44ac2f40b644..bff8a3cdd3df 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -1076,7 +1076,7 @@ static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1076 return 0; 1076 return 0;
1077} 1077}
1078 1078
1079/* 1079/**
1080 * t3_load_fw - download firmware 1080 * t3_load_fw - download firmware
1081 * @adapter: the adapter 1081 * @adapter: the adapter
1082 * @fw_data: the firmware image to write 1082 * @fw_data: the firmware image to write
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index e1f96fbb48c1..5ed49af23d6a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3493,8 +3493,8 @@ static void __devinit cfg_queues(struct adapter *adap)
3493 */ 3493 */
3494 if (n10g) 3494 if (n10g)
3495 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g; 3495 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
3496 if (q10g > num_online_cpus()) 3496 if (q10g > netif_get_num_default_rss_queues())
3497 q10g = num_online_cpus(); 3497 q10g = netif_get_num_default_rss_queues();
3498 3498
3499 for_each_port(adap, i) { 3499 for_each_port(adap, i) {
3500 struct port_info *pi = adap2pinfo(adap, i); 3500 struct port_info *pi = adap2pinfo(adap, i);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index e111d974afd8..8596acaa402b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -753,7 +753,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
753 end = (void *)q->desc + part1; 753 end = (void *)q->desc + part1;
754 } 754 }
755 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ 755 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
756 *(u64 *)end = 0; 756 *end = 0;
757} 757}
758 758
759/** 759/**
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 32e1dd566a14..fa947dfa4c30 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2010,7 +2010,7 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
2010 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2010 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2011} 2011}
2012 2012
2013/* 2013/**
2014 * t4_mem_win_read_len - read memory through PCIE memory window 2014 * t4_mem_win_read_len - read memory through PCIE memory window
2015 * @adap: the adapter 2015 * @adap: the adapter
2016 * @addr: address of first byte requested aligned on 32b. 2016 * @addr: address of first byte requested aligned on 32b.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 25e3308fc9d8..9dad56101e23 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -418,7 +418,7 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
418 * restart a TX Ethernet Queue which was stopped for lack of 418 * restart a TX Ethernet Queue which was stopped for lack of
419 * free TX Queue Descriptors ... 419 * free TX Queue Descriptors ...
420 */ 420 */
421 const struct cpl_sge_egr_update *p = (void *)cpl; 421 const struct cpl_sge_egr_update *p = cpl;
422 unsigned int qid = EGR_QID(be32_to_cpu(p->opcode_qid)); 422 unsigned int qid = EGR_QID(be32_to_cpu(p->opcode_qid));
423 struct sge *s = &adapter->sge; 423 struct sge *s = &adapter->sge;
424 struct sge_txq *tq; 424 struct sge_txq *tq;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 0bd585bba39d..f2d1ecdcaf98 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -934,7 +934,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
934 end = (void *)tq->desc + part1; 934 end = (void *)tq->desc + part1;
935 } 935 }
936 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ 936 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
937 *(u64 *)end = 0; 937 *end = 0;
938} 938}
939 939
940/** 940/**
@@ -1323,8 +1323,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1323 */ 1323 */
1324 if (unlikely((void *)sgl == (void *)tq->stat)) { 1324 if (unlikely((void *)sgl == (void *)tq->stat)) {
1325 sgl = (void *)tq->desc; 1325 sgl = (void *)tq->desc;
1326 end = (void *)((void *)tq->desc + 1326 end = ((void *)tq->desc + ((void *)end - (void *)tq->stat));
1327 ((void *)end - (void *)tq->stat));
1328 } 1327 }
1329 1328
1330 write_sgl(skb, tq, sgl, end, 0, addr); 1329 write_sgl(skb, tq, sgl, end, 0, addr);
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 8132c785cea8..ad1468b3ab91 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1300,8 +1300,6 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
1300 skb->ip_summed = CHECKSUM_COMPLETE; 1300 skb->ip_summed = CHECKSUM_COMPLETE;
1301 } 1301 }
1302 1302
1303 skb->dev = netdev;
1304
1305 if (vlan_stripped) 1303 if (vlan_stripped)
1306 __vlan_hwaccel_put_tag(skb, vlan_tci); 1304 __vlan_hwaccel_put_tag(skb, vlan_tci);
1307 1305
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index d3cd489d11a2..f879e9224846 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -3973,7 +3973,7 @@ DevicePresent(struct net_device *dev, u_long aprom_addr)
3973 tmp = srom_rd(aprom_addr, i); 3973 tmp = srom_rd(aprom_addr, i);
3974 *p++ = cpu_to_le16(tmp); 3974 *p++ = cpu_to_le16(tmp);
3975 } 3975 }
3976 de4x5_dbg_srom((struct de4x5_srom *)&lp->srom); 3976 de4x5_dbg_srom(&lp->srom);
3977 } 3977 }
3978} 3978}
3979 3979
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index c5c4c0e83bd1..d266c86a53f7 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -34,7 +34,7 @@
34#include "be_hw.h" 34#include "be_hw.h"
35#include "be_roce.h" 35#include "be_roce.h"
36 36
37#define DRV_VER "4.2.220u" 37#define DRV_VER "4.4.31.0u"
38#define DRV_NAME "be2net" 38#define DRV_NAME "be2net"
39#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" 39#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
40#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC" 40#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
@@ -389,6 +389,7 @@ struct be_adapter {
389 struct delayed_work work; 389 struct delayed_work work;
390 u16 work_counter; 390 u16 work_counter;
391 391
392 struct delayed_work func_recovery_work;
392 u32 flags; 393 u32 flags;
393 /* Ethtool knobs and info */ 394 /* Ethtool knobs and info */
394 char fw_ver[FW_VER_LEN]; 395 char fw_ver[FW_VER_LEN];
@@ -396,9 +397,10 @@ struct be_adapter {
396 u32 *pmac_id; /* MAC addr handle used by BE card */ 397 u32 *pmac_id; /* MAC addr handle used by BE card */
397 u32 beacon_state; /* for set_phys_id */ 398 u32 beacon_state; /* for set_phys_id */
398 399
399 bool eeh_err; 400 bool eeh_error;
400 bool ue_detected;
401 bool fw_timeout; 401 bool fw_timeout;
402 bool hw_error;
403
402 u32 port_num; 404 u32 port_num;
403 bool promiscuous; 405 bool promiscuous;
404 u32 function_mode; 406 u32 function_mode;
@@ -435,6 +437,7 @@ struct be_adapter {
435 u32 max_pmac_cnt; /* Max secondary UC MACs programmable */ 437 u32 max_pmac_cnt; /* Max secondary UC MACs programmable */
436 u32 uc_macs; /* Count of secondary UC MAC programmed */ 438 u32 uc_macs; /* Count of secondary UC MAC programmed */
437 u32 msg_enable; 439 u32 msg_enable;
440 int be_get_temp_freq;
438}; 441};
439 442
440#define be_physfn(adapter) (!adapter->virtfn) 443#define be_physfn(adapter) (!adapter->virtfn)
@@ -454,6 +457,9 @@ struct be_adapter {
454#define lancer_chip(adapter) ((adapter->pdev->device == OC_DEVICE_ID3) || \ 457#define lancer_chip(adapter) ((adapter->pdev->device == OC_DEVICE_ID3) || \
455 (adapter->pdev->device == OC_DEVICE_ID4)) 458 (adapter->pdev->device == OC_DEVICE_ID4))
456 459
460#define skyhawk_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID5)
461
462
457#define be_roce_supported(adapter) ((adapter->if_type == SLI_INTF_TYPE_3 || \ 463#define be_roce_supported(adapter) ((adapter->if_type == SLI_INTF_TYPE_3 || \
458 adapter->sli_family == SKYHAWK_SLI_FAMILY) && \ 464 adapter->sli_family == SKYHAWK_SLI_FAMILY) && \
459 (adapter->function_mode & RDMA_ENABLED)) 465 (adapter->function_mode & RDMA_ENABLED))
@@ -573,6 +579,11 @@ static inline u8 is_udp_pkt(struct sk_buff *skb)
573 return val; 579 return val;
574} 580}
575 581
582static inline bool is_ipv4_pkt(struct sk_buff *skb)
583{
584 return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
585}
586
576static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac) 587static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
577{ 588{
578 u32 addr; 589 u32 addr;
@@ -593,7 +604,19 @@ static inline bool be_multi_rxq(const struct be_adapter *adapter)
593 604
594static inline bool be_error(struct be_adapter *adapter) 605static inline bool be_error(struct be_adapter *adapter)
595{ 606{
596 return adapter->eeh_err || adapter->ue_detected || adapter->fw_timeout; 607 return adapter->eeh_error || adapter->hw_error || adapter->fw_timeout;
608}
609
610static inline bool be_crit_error(struct be_adapter *adapter)
611{
612 return adapter->eeh_error || adapter->hw_error;
613}
614
615static inline void be_clear_all_error(struct be_adapter *adapter)
616{
617 adapter->eeh_error = false;
618 adapter->hw_error = false;
619 adapter->fw_timeout = false;
597} 620}
598 621
599static inline bool be_is_wol_excluded(struct be_adapter *adapter) 622static inline bool be_is_wol_excluded(struct be_adapter *adapter)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 921c2082af4c..7fac97b4bb59 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -19,9 +19,6 @@
19#include "be.h" 19#include "be.h"
20#include "be_cmds.h" 20#include "be_cmds.h"
21 21
22/* Must be a power of 2 or else MODULO will BUG_ON */
23static int be_get_temp_freq = 64;
24
25static inline void *embedded_payload(struct be_mcc_wrb *wrb) 22static inline void *embedded_payload(struct be_mcc_wrb *wrb)
26{ 23{
27 return wrb->payload.embedded_payload; 24 return wrb->payload.embedded_payload;
@@ -115,7 +112,7 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
115 } 112 }
116 } else { 113 } else {
117 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES) 114 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
118 be_get_temp_freq = 0; 115 adapter->be_get_temp_freq = 0;
119 116
120 if (compl_status == MCC_STATUS_NOT_SUPPORTED || 117 if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
121 compl_status == MCC_STATUS_ILLEGAL_REQUEST) 118 compl_status == MCC_STATUS_ILLEGAL_REQUEST)
@@ -144,6 +141,11 @@ static void be_async_link_state_process(struct be_adapter *adapter,
144 /* When link status changes, link speed must be re-queried from FW */ 141 /* When link status changes, link speed must be re-queried from FW */
145 adapter->phy.link_speed = -1; 142 adapter->phy.link_speed = -1;
146 143
144 /* Ignore physical link event */
145 if (lancer_chip(adapter) &&
146 !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
147 return;
148
147 /* For the initial link status do not rely on the ASYNC event as 149 /* For the initial link status do not rely on the ASYNC event as
148 * it may not be received in some cases. 150 * it may not be received in some cases.
149 */ 151 */
@@ -352,7 +354,7 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
352 if (msecs > 4000) { 354 if (msecs > 4000) {
353 dev_err(&adapter->pdev->dev, "FW not responding\n"); 355 dev_err(&adapter->pdev->dev, "FW not responding\n");
354 adapter->fw_timeout = true; 356 adapter->fw_timeout = true;
355 be_detect_dump_ue(adapter); 357 be_detect_error(adapter);
356 return -1; 358 return -1;
357 } 359 }
358 360
@@ -429,12 +431,65 @@ static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
429 return 0; 431 return 0;
430} 432}
431 433
432int be_cmd_POST(struct be_adapter *adapter) 434int lancer_wait_ready(struct be_adapter *adapter)
435{
436#define SLIPORT_READY_TIMEOUT 30
437 u32 sliport_status;
438 int status = 0, i;
439
440 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
441 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
442 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
443 break;
444
445 msleep(1000);
446 }
447
448 if (i == SLIPORT_READY_TIMEOUT)
449 status = -1;
450
451 return status;
452}
453
454int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
455{
456 int status;
457 u32 sliport_status, err, reset_needed;
458 status = lancer_wait_ready(adapter);
459 if (!status) {
460 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
461 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
462 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
463 if (err && reset_needed) {
464 iowrite32(SLI_PORT_CONTROL_IP_MASK,
465 adapter->db + SLIPORT_CONTROL_OFFSET);
466
467 /* check adapter has corrected the error */
468 status = lancer_wait_ready(adapter);
469 sliport_status = ioread32(adapter->db +
470 SLIPORT_STATUS_OFFSET);
471 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
472 SLIPORT_STATUS_RN_MASK);
473 if (status || sliport_status)
474 status = -1;
475 } else if (err || reset_needed) {
476 status = -1;
477 }
478 }
479 return status;
480}
481
482int be_fw_wait_ready(struct be_adapter *adapter)
433{ 483{
434 u16 stage; 484 u16 stage;
435 int status, timeout = 0; 485 int status, timeout = 0;
436 struct device *dev = &adapter->pdev->dev; 486 struct device *dev = &adapter->pdev->dev;
437 487
488 if (lancer_chip(adapter)) {
489 status = lancer_wait_ready(adapter);
490 return status;
491 }
492
438 do { 493 do {
439 status = be_POST_stage_get(adapter, &stage); 494 status = be_POST_stage_get(adapter, &stage);
440 if (status) { 495 if (status) {
@@ -565,6 +620,9 @@ int be_cmd_fw_init(struct be_adapter *adapter)
565 u8 *wrb; 620 u8 *wrb;
566 int status; 621 int status;
567 622
623 if (lancer_chip(adapter))
624 return 0;
625
568 if (mutex_lock_interruptible(&adapter->mbox_lock)) 626 if (mutex_lock_interruptible(&adapter->mbox_lock))
569 return -1; 627 return -1;
570 628
@@ -592,6 +650,9 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
592 u8 *wrb; 650 u8 *wrb;
593 int status; 651 int status;
594 652
653 if (lancer_chip(adapter))
654 return 0;
655
595 if (mutex_lock_interruptible(&adapter->mbox_lock)) 656 if (mutex_lock_interruptible(&adapter->mbox_lock))
596 return -1; 657 return -1;
597 658
@@ -610,6 +671,7 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
610 mutex_unlock(&adapter->mbox_lock); 671 mutex_unlock(&adapter->mbox_lock);
611 return status; 672 return status;
612} 673}
674
613int be_cmd_eq_create(struct be_adapter *adapter, 675int be_cmd_eq_create(struct be_adapter *adapter,
614 struct be_queue_info *eq, int eq_delay) 676 struct be_queue_info *eq, int eq_delay)
615{ 677{
@@ -1132,7 +1194,7 @@ err:
1132 * Uses MCCQ 1194 * Uses MCCQ
1133 */ 1195 */
1134int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, 1196int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1135 u8 *mac, u32 *if_handle, u32 *pmac_id, u32 domain) 1197 u32 *if_handle, u32 domain)
1136{ 1198{
1137 struct be_mcc_wrb *wrb; 1199 struct be_mcc_wrb *wrb;
1138 struct be_cmd_req_if_create *req; 1200 struct be_cmd_req_if_create *req;
@@ -1152,17 +1214,13 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1152 req->hdr.domain = domain; 1214 req->hdr.domain = domain;
1153 req->capability_flags = cpu_to_le32(cap_flags); 1215 req->capability_flags = cpu_to_le32(cap_flags);
1154 req->enable_flags = cpu_to_le32(en_flags); 1216 req->enable_flags = cpu_to_le32(en_flags);
1155 if (mac) 1217
1156 memcpy(req->mac_addr, mac, ETH_ALEN); 1218 req->pmac_invalid = true;
1157 else
1158 req->pmac_invalid = true;
1159 1219
1160 status = be_mcc_notify_wait(adapter); 1220 status = be_mcc_notify_wait(adapter);
1161 if (!status) { 1221 if (!status) {
1162 struct be_cmd_resp_if_create *resp = embedded_payload(wrb); 1222 struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
1163 *if_handle = le32_to_cpu(resp->interface_id); 1223 *if_handle = le32_to_cpu(resp->interface_id);
1164 if (mac)
1165 *pmac_id = le32_to_cpu(resp->pmac_id);
1166 } 1224 }
1167 1225
1168err: 1226err:
@@ -1210,9 +1268,6 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1210 struct be_cmd_req_hdr *hdr; 1268 struct be_cmd_req_hdr *hdr;
1211 int status = 0; 1269 int status = 0;
1212 1270
1213 if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
1214 be_cmd_get_die_temperature(adapter);
1215
1216 spin_lock_bh(&adapter->mcc_lock); 1271 spin_lock_bh(&adapter->mcc_lock);
1217 1272
1218 wrb = wrb_from_mccq(adapter); 1273 wrb = wrb_from_mccq(adapter);
@@ -1581,7 +1636,8 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1581 /* Reset mcast promisc mode if already set by setting mask 1636 /* Reset mcast promisc mode if already set by setting mask
1582 * and not setting flags field 1637 * and not setting flags field
1583 */ 1638 */
1584 req->if_flags_mask |= 1639 if (!lancer_chip(adapter) || be_physfn(adapter))
1640 req->if_flags_mask |=
1585 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); 1641 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1586 1642
1587 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev)); 1643 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
@@ -1692,6 +1748,20 @@ int be_cmd_reset_function(struct be_adapter *adapter)
1692 struct be_cmd_req_hdr *req; 1748 struct be_cmd_req_hdr *req;
1693 int status; 1749 int status;
1694 1750
1751 if (lancer_chip(adapter)) {
1752 status = lancer_wait_ready(adapter);
1753 if (!status) {
1754 iowrite32(SLI_PORT_CONTROL_IP_MASK,
1755 adapter->db + SLIPORT_CONTROL_OFFSET);
1756 status = lancer_test_and_set_rdy_state(adapter);
1757 }
1758 if (status) {
1759 dev_err(&adapter->pdev->dev,
1760 "Adapter in non recoverable error\n");
1761 }
1762 return status;
1763 }
1764
1695 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1765 if (mutex_lock_interruptible(&adapter->mbox_lock))
1696 return -1; 1766 return -1;
1697 1767
@@ -1728,6 +1798,13 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
1728 req->if_id = cpu_to_le32(adapter->if_handle); 1798 req->if_id = cpu_to_le32(adapter->if_handle);
1729 req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 | 1799 req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
1730 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6); 1800 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6);
1801
1802 if (lancer_chip(adapter) || skyhawk_chip(adapter)) {
1803 req->hdr.version = 1;
1804 req->enable_rss |= cpu_to_le16(RSS_ENABLE_UDP_IPV4 |
1805 RSS_ENABLE_UDP_IPV6);
1806 }
1807
1731 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1); 1808 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1732 memcpy(req->cpu_table, rsstable, table_size); 1809 memcpy(req->cpu_table, rsstable, table_size);
1733 memcpy(req->hash, myhash, sizeof(myhash)); 1810 memcpy(req->hash, myhash, sizeof(myhash));
@@ -1805,8 +1882,9 @@ err:
1805} 1882}
1806 1883
1807int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, 1884int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
1808 u32 data_size, u32 data_offset, const char *obj_name, 1885 u32 data_size, u32 data_offset,
1809 u32 *data_written, u8 *addn_status) 1886 const char *obj_name, u32 *data_written,
1887 u8 *change_status, u8 *addn_status)
1810{ 1888{
1811 struct be_mcc_wrb *wrb; 1889 struct be_mcc_wrb *wrb;
1812 struct lancer_cmd_req_write_object *req; 1890 struct lancer_cmd_req_write_object *req;
@@ -1862,10 +1940,12 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
1862 status = adapter->flash_status; 1940 status = adapter->flash_status;
1863 1941
1864 resp = embedded_payload(wrb); 1942 resp = embedded_payload(wrb);
1865 if (!status) 1943 if (!status) {
1866 *data_written = le32_to_cpu(resp->actual_write_len); 1944 *data_written = le32_to_cpu(resp->actual_write_len);
1867 else 1945 *change_status = resp->change_status;
1946 } else {
1868 *addn_status = resp->additional_status; 1947 *addn_status = resp->additional_status;
1948 }
1869 1949
1870 return status; 1950 return status;
1871 1951
@@ -2330,8 +2410,8 @@ err:
2330} 2410}
2331 2411
2332/* Uses synchronous MCCQ */ 2412/* Uses synchronous MCCQ */
2333int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain, 2413int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2334 bool *pmac_id_active, u32 *pmac_id, u8 *mac) 2414 bool *pmac_id_active, u32 *pmac_id, u8 domain)
2335{ 2415{
2336 struct be_mcc_wrb *wrb; 2416 struct be_mcc_wrb *wrb;
2337 struct be_cmd_req_get_mac_list *req; 2417 struct be_cmd_req_get_mac_list *req;
@@ -2376,8 +2456,9 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
2376 get_mac_list_cmd.va; 2456 get_mac_list_cmd.va;
2377 mac_count = resp->true_mac_count + resp->pseudo_mac_count; 2457 mac_count = resp->true_mac_count + resp->pseudo_mac_count;
2378 /* Mac list returned could contain one or more active mac_ids 2458 /* Mac list returned could contain one or more active mac_ids
2379 * or one or more pseudo permanant mac addresses. If an active 2459 * or one or more true or pseudo permanant mac addresses.
2380 * mac_id is present, return first active mac_id found 2460 * If an active mac_id is present, return first active mac_id
2461 * found.
2381 */ 2462 */
2382 for (i = 0; i < mac_count; i++) { 2463 for (i = 0; i < mac_count; i++) {
2383 struct get_list_macaddr *mac_entry; 2464 struct get_list_macaddr *mac_entry;
@@ -2396,7 +2477,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
2396 goto out; 2477 goto out;
2397 } 2478 }
2398 } 2479 }
2399 /* If no active mac_id found, return first pseudo mac addr */ 2480 /* If no active mac_id found, return first mac addr */
2400 *pmac_id_active = false; 2481 *pmac_id_active = false;
2401 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr, 2482 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
2402 ETH_ALEN); 2483 ETH_ALEN);
@@ -2648,6 +2729,44 @@ err:
2648 return status; 2729 return status;
2649} 2730}
2650 2731
2732int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name)
2733{
2734 struct be_mcc_wrb *wrb;
2735 struct be_cmd_req_get_port_name *req;
2736 int status;
2737
2738 if (!lancer_chip(adapter)) {
2739 *port_name = adapter->hba_port_num + '0';
2740 return 0;
2741 }
2742
2743 spin_lock_bh(&adapter->mcc_lock);
2744
2745 wrb = wrb_from_mccq(adapter);
2746 if (!wrb) {
2747 status = -EBUSY;
2748 goto err;
2749 }
2750
2751 req = embedded_payload(wrb);
2752
2753 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2754 OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
2755 NULL);
2756 req->hdr.version = 1;
2757
2758 status = be_mcc_notify_wait(adapter);
2759 if (!status) {
2760 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
2761 *port_name = resp->port_name[adapter->hba_port_num];
2762 } else {
2763 *port_name = adapter->hba_port_num + '0';
2764 }
2765err:
2766 spin_unlock_bh(&adapter->mcc_lock);
2767 return status;
2768}
2769
2651int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, 2770int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
2652 int wrb_payload_size, u16 *cmd_status, u16 *ext_status) 2771 int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
2653{ 2772{
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index b3f3fc3d1323..250f19b5f7b6 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -93,6 +93,7 @@ enum {
93 LINK_UP = 0x1 93 LINK_UP = 0x1
94}; 94};
95#define LINK_STATUS_MASK 0x1 95#define LINK_STATUS_MASK 0x1
96#define LOGICAL_LINK_STATUS_MASK 0x2
96 97
97/* When the event code of an async trailer is link-state, the mcc_compl 98/* When the event code of an async trailer is link-state, the mcc_compl
98 * must be interpreted as follows 99 * must be interpreted as follows
@@ -186,6 +187,7 @@ struct be_mcc_mailbox {
186#define OPCODE_COMMON_ENABLE_DISABLE_BEACON 69 187#define OPCODE_COMMON_ENABLE_DISABLE_BEACON 69
187#define OPCODE_COMMON_GET_BEACON_STATE 70 188#define OPCODE_COMMON_GET_BEACON_STATE 70
188#define OPCODE_COMMON_READ_TRANSRECV_DATA 73 189#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
190#define OPCODE_COMMON_GET_PORT_NAME 77
189#define OPCODE_COMMON_GET_PHY_DETAILS 102 191#define OPCODE_COMMON_GET_PHY_DETAILS 102
190#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103 192#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
191#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121 193#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
@@ -1081,13 +1083,25 @@ struct be_cmd_resp_query_fw_cfg {
1081 u32 function_caps; 1083 u32 function_caps;
1082}; 1084};
1083 1085
1084/******************** RSS Config *******************/ 1086/******************** RSS Config ****************************************/
1085/* RSS types */ 1087/* RSS type Input parameters used to compute RX hash
1088 * RSS_ENABLE_IPV4 SRC IPv4, DST IPv4
1089 * RSS_ENABLE_TCP_IPV4 SRC IPv4, DST IPv4, TCP SRC PORT, TCP DST PORT
1090 * RSS_ENABLE_IPV6 SRC IPv6, DST IPv6
1091 * RSS_ENABLE_TCP_IPV6 SRC IPv6, DST IPv6, TCP SRC PORT, TCP DST PORT
1092 * RSS_ENABLE_UDP_IPV4 SRC IPv4, DST IPv4, UDP SRC PORT, UDP DST PORT
1093 * RSS_ENABLE_UDP_IPV6 SRC IPv6, DST IPv6, UDP SRC PORT, UDP DST PORT
1094 *
1095 * When multiple RSS types are enabled, HW picks the best hash policy
1096 * based on the type of the received packet.
1097 */
1086#define RSS_ENABLE_NONE 0x0 1098#define RSS_ENABLE_NONE 0x0
1087#define RSS_ENABLE_IPV4 0x1 1099#define RSS_ENABLE_IPV4 0x1
1088#define RSS_ENABLE_TCP_IPV4 0x2 1100#define RSS_ENABLE_TCP_IPV4 0x2
1089#define RSS_ENABLE_IPV6 0x4 1101#define RSS_ENABLE_IPV6 0x4
1090#define RSS_ENABLE_TCP_IPV6 0x8 1102#define RSS_ENABLE_TCP_IPV6 0x8
1103#define RSS_ENABLE_UDP_IPV4 0x10
1104#define RSS_ENABLE_UDP_IPV6 0x20
1091 1105
1092struct be_cmd_req_rss_config { 1106struct be_cmd_req_rss_config {
1093 struct be_cmd_req_hdr hdr; 1107 struct be_cmd_req_hdr hdr;
@@ -1163,6 +1177,8 @@ struct lancer_cmd_req_write_object {
1163 u32 addr_high; 1177 u32 addr_high;
1164}; 1178};
1165 1179
1180#define LANCER_NO_RESET_NEEDED 0x00
1181#define LANCER_FW_RESET_NEEDED 0x02
1166struct lancer_cmd_resp_write_object { 1182struct lancer_cmd_resp_write_object {
1167 u8 opcode; 1183 u8 opcode;
1168 u8 subsystem; 1184 u8 subsystem;
@@ -1173,6 +1189,8 @@ struct lancer_cmd_resp_write_object {
1173 u32 resp_len; 1189 u32 resp_len;
1174 u32 actual_resp_len; 1190 u32 actual_resp_len;
1175 u32 actual_write_len; 1191 u32 actual_write_len;
1192 u8 change_status;
1193 u8 rsvd3[3];
1176}; 1194};
1177 1195
1178/************************ Lancer Read FW info **************/ 1196/************************ Lancer Read FW info **************/
@@ -1502,6 +1520,17 @@ struct be_cmd_resp_get_hsw_config {
1502 u32 rsvd; 1520 u32 rsvd;
1503}; 1521};
1504 1522
1523/******************* get port names ***************/
1524struct be_cmd_req_get_port_name {
1525 struct be_cmd_req_hdr hdr;
1526 u32 rsvd0;
1527};
1528
1529struct be_cmd_resp_get_port_name {
1530 struct be_cmd_req_hdr hdr;
1531 u8 port_name[4];
1532};
1533
1505/*************** HW Stats Get v1 **********************************/ 1534/*************** HW Stats Get v1 **********************************/
1506#define BE_TXP_SW_SZ 48 1535#define BE_TXP_SW_SZ 48
1507struct be_port_rxf_stats_v1 { 1536struct be_port_rxf_stats_v1 {
@@ -1656,7 +1685,7 @@ struct be_cmd_req_set_ext_fat_caps {
1656}; 1685};
1657 1686
1658extern int be_pci_fnum_get(struct be_adapter *adapter); 1687extern int be_pci_fnum_get(struct be_adapter *adapter);
1659extern int be_cmd_POST(struct be_adapter *adapter); 1688extern int be_fw_wait_ready(struct be_adapter *adapter);
1660extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, 1689extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
1661 u8 type, bool permanent, u32 if_handle, u32 pmac_id); 1690 u8 type, bool permanent, u32 if_handle, u32 pmac_id);
1662extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, 1691extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
@@ -1664,8 +1693,7 @@ extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
1664extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, 1693extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
1665 int pmac_id, u32 domain); 1694 int pmac_id, u32 domain);
1666extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, 1695extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
1667 u32 en_flags, u8 *mac, u32 *if_handle, u32 *pmac_id, 1696 u32 en_flags, u32 *if_handle, u32 domain);
1668 u32 domain);
1669extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle, 1697extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle,
1670 u32 domain); 1698 u32 domain);
1671extern int be_cmd_eq_create(struct be_adapter *adapter, 1699extern int be_cmd_eq_create(struct be_adapter *adapter,
@@ -1719,10 +1747,11 @@ extern int be_cmd_write_flashrom(struct be_adapter *adapter,
1719 struct be_dma_mem *cmd, u32 flash_oper, 1747 struct be_dma_mem *cmd, u32 flash_oper,
1720 u32 flash_opcode, u32 buf_size); 1748 u32 flash_opcode, u32 buf_size);
1721extern int lancer_cmd_write_object(struct be_adapter *adapter, 1749extern int lancer_cmd_write_object(struct be_adapter *adapter,
1722 struct be_dma_mem *cmd, 1750 struct be_dma_mem *cmd,
1723 u32 data_size, u32 data_offset, 1751 u32 data_size, u32 data_offset,
1724 const char *obj_name, 1752 const char *obj_name,
1725 u32 *data_written, u8 *addn_status); 1753 u32 *data_written, u8 *change_status,
1754 u8 *addn_status);
1726int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, 1755int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
1727 u32 data_size, u32 data_offset, const char *obj_name, 1756 u32 data_size, u32 data_offset, const char *obj_name,
1728 u32 *data_read, u32 *eof, u8 *addn_status); 1757 u32 *data_read, u32 *eof, u8 *addn_status);
@@ -1745,14 +1774,15 @@ extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
1745 u8 loopback_type, u8 enable); 1774 u8 loopback_type, u8 enable);
1746extern int be_cmd_get_phy_info(struct be_adapter *adapter); 1775extern int be_cmd_get_phy_info(struct be_adapter *adapter);
1747extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain); 1776extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
1748extern void be_detect_dump_ue(struct be_adapter *adapter); 1777extern void be_detect_error(struct be_adapter *adapter);
1749extern int be_cmd_get_die_temperature(struct be_adapter *adapter); 1778extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
1750extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter); 1779extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
1751extern int be_cmd_req_native_mode(struct be_adapter *adapter); 1780extern int be_cmd_req_native_mode(struct be_adapter *adapter);
1752extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size); 1781extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
1753extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf); 1782extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
1754extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain, 1783extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
1755 bool *pmac_id_active, u32 *pmac_id, u8 *mac); 1784 bool *pmac_id_active, u32 *pmac_id,
1785 u8 domain);
1756extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, 1786extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
1757 u8 mac_count, u32 domain); 1787 u8 mac_count, u32 domain);
1758extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, 1788extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
@@ -1765,4 +1795,7 @@ extern int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
1765extern int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter, 1795extern int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
1766 struct be_dma_mem *cmd, 1796 struct be_dma_mem *cmd,
1767 struct be_fat_conf_params *cfgs); 1797 struct be_fat_conf_params *cfgs);
1798extern int lancer_wait_ready(struct be_adapter *adapter);
1799extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
1800extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
1768 1801
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 63e51d476900..e34be1c7ae8a 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -648,7 +648,7 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
648 struct be_adapter *adapter = netdev_priv(netdev); 648 struct be_adapter *adapter = netdev_priv(netdev);
649 int status; 649 int status;
650 650
651 if (ecmd->autoneg != 0) 651 if (ecmd->autoneg != adapter->phy.fc_autoneg)
652 return -EINVAL; 652 return -EINVAL;
653 adapter->tx_fc = ecmd->tx_pause; 653 adapter->tx_fc = ecmd->tx_pause;
654 adapter->rx_fc = ecmd->rx_pause; 654 adapter->rx_fc = ecmd->rx_pause;
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index d9fb0c501fa1..b755f7061dce 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -45,20 +45,19 @@
45#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */ 45#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
46 46
47 47
48/* Lancer SLIPORT_CONTROL SLIPORT_STATUS registers */ 48/* Lancer SLIPORT registers */
49#define SLIPORT_STATUS_OFFSET 0x404 49#define SLIPORT_STATUS_OFFSET 0x404
50#define SLIPORT_CONTROL_OFFSET 0x408 50#define SLIPORT_CONTROL_OFFSET 0x408
51#define SLIPORT_ERROR1_OFFSET 0x40C 51#define SLIPORT_ERROR1_OFFSET 0x40C
52#define SLIPORT_ERROR2_OFFSET 0x410 52#define SLIPORT_ERROR2_OFFSET 0x410
53#define PHYSDEV_CONTROL_OFFSET 0x414
53 54
54#define SLIPORT_STATUS_ERR_MASK 0x80000000 55#define SLIPORT_STATUS_ERR_MASK 0x80000000
55#define SLIPORT_STATUS_RN_MASK 0x01000000 56#define SLIPORT_STATUS_RN_MASK 0x01000000
56#define SLIPORT_STATUS_RDY_MASK 0x00800000 57#define SLIPORT_STATUS_RDY_MASK 0x00800000
57
58
59#define SLI_PORT_CONTROL_IP_MASK 0x08000000 58#define SLI_PORT_CONTROL_IP_MASK 0x08000000
60 59#define PHYSDEV_CONTROL_FW_RESET_MASK 0x00000002
61#define PCICFG_CUST_SCRATCHPAD_CSR 0x1EC 60#define PHYSDEV_CONTROL_INP_MASK 0x40000000
62 61
63/********* Memory BAR register ************/ 62/********* Memory BAR register ************/
64#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc 63#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 501dfa9c88ec..4d9677174490 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -155,7 +155,7 @@ static void be_intr_set(struct be_adapter *adapter, bool enable)
155{ 155{
156 u32 reg, enabled; 156 u32 reg, enabled;
157 157
158 if (adapter->eeh_err) 158 if (adapter->eeh_error)
159 return; 159 return;
160 160
161 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, 161 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
@@ -201,7 +201,7 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid,
201 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << 201 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202 DB_EQ_RING_ID_EXT_MASK_SHIFT); 202 DB_EQ_RING_ID_EXT_MASK_SHIFT);
203 203
204 if (adapter->eeh_err) 204 if (adapter->eeh_error)
205 return; 205 return;
206 206
207 if (arm) 207 if (arm)
@@ -220,7 +220,7 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
220 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) << 220 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221 DB_CQ_RING_ID_EXT_MASK_SHIFT); 221 DB_CQ_RING_ID_EXT_MASK_SHIFT);
222 222
223 if (adapter->eeh_err) 223 if (adapter->eeh_error)
224 return; 224 return;
225 225
226 if (arm) 226 if (arm)
@@ -558,6 +558,7 @@ static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
558 wrb->frag_pa_hi = upper_32_bits(addr); 558 wrb->frag_pa_hi = upper_32_bits(addr);
559 wrb->frag_pa_lo = addr & 0xFFFFFFFF; 559 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK; 560 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
561 wrb->rsvd0 = 0;
561} 562}
562 563
563static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter, 564static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
@@ -576,6 +577,11 @@ static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
576 return vlan_tag; 577 return vlan_tag;
577} 578}
578 579
580static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
581{
582 return vlan_tx_tag_present(skb) || adapter->pvid;
583}
584
579static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr, 585static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
580 struct sk_buff *skb, u32 wrb_cnt, u32 len) 586 struct sk_buff *skb, u32 wrb_cnt, u32 len)
581{ 587{
@@ -703,33 +709,56 @@ dma_err:
703 return 0; 709 return 0;
704} 710}
705 711
712static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
713 struct sk_buff *skb)
714{
715 u16 vlan_tag = 0;
716
717 skb = skb_share_check(skb, GFP_ATOMIC);
718 if (unlikely(!skb))
719 return skb;
720
721 if (vlan_tx_tag_present(skb)) {
722 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
723 __vlan_put_tag(skb, vlan_tag);
724 skb->vlan_tci = 0;
725 }
726
727 return skb;
728}
729
706static netdev_tx_t be_xmit(struct sk_buff *skb, 730static netdev_tx_t be_xmit(struct sk_buff *skb,
707 struct net_device *netdev) 731 struct net_device *netdev)
708{ 732{
709 struct be_adapter *adapter = netdev_priv(netdev); 733 struct be_adapter *adapter = netdev_priv(netdev);
710 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)]; 734 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
711 struct be_queue_info *txq = &txo->q; 735 struct be_queue_info *txq = &txo->q;
736 struct iphdr *ip = NULL;
712 u32 wrb_cnt = 0, copied = 0; 737 u32 wrb_cnt = 0, copied = 0;
713 u32 start = txq->head; 738 u32 start = txq->head, eth_hdr_len;
714 bool dummy_wrb, stopped = false; 739 bool dummy_wrb, stopped = false;
715 740
716 /* For vlan tagged pkts, BE 741 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
717 * 1) calculates checksum even when CSO is not requested 742 VLAN_ETH_HLEN : ETH_HLEN;
718 * 2) calculates checksum wrongly for padded pkt less than 743
719 * 60 bytes long. 744 /* HW has a bug which considers padding bytes as legal
720 * As a workaround disable TX vlan offloading in such cases. 745 * and modifies the IPv4 hdr's 'tot_len' field
721 */ 746 */
722 if (unlikely(vlan_tx_tag_present(skb) && 747 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
723 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) { 748 is_ipv4_pkt(skb)) {
724 skb = skb_share_check(skb, GFP_ATOMIC); 749 ip = (struct iphdr *)ip_hdr(skb);
725 if (unlikely(!skb)) 750 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
726 goto tx_drop; 751 }
727 752
728 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb)); 753 /* HW has a bug wherein it will calculate CSUM for VLAN
754 * pkts even though it is disabled.
755 * Manually insert VLAN in pkt.
756 */
757 if (skb->ip_summed != CHECKSUM_PARTIAL &&
758 be_vlan_tag_chk(adapter, skb)) {
759 skb = be_insert_vlan_in_pkt(adapter, skb);
729 if (unlikely(!skb)) 760 if (unlikely(!skb))
730 goto tx_drop; 761 goto tx_drop;
731
732 skb->vlan_tci = 0;
733 } 762 }
734 763
735 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb); 764 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
@@ -786,19 +815,12 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
786 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE. 815 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
787 * If the user configures more, place BE in vlan promiscuous mode. 816 * If the user configures more, place BE in vlan promiscuous mode.
788 */ 817 */
789static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num) 818static int be_vid_config(struct be_adapter *adapter)
790{ 819{
791 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num]; 820 u16 vids[BE_NUM_VLANS_SUPPORTED];
792 u16 vtag[BE_NUM_VLANS_SUPPORTED]; 821 u16 num = 0, i;
793 u16 ntags = 0, i;
794 int status = 0; 822 int status = 0;
795 823
796 if (vf) {
797 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
798 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
799 1, 1, 0);
800 }
801
802 /* No need to further configure vids if in promiscuous mode */ 824 /* No need to further configure vids if in promiscuous mode */
803 if (adapter->promiscuous) 825 if (adapter->promiscuous)
804 return 0; 826 return 0;
@@ -809,10 +831,10 @@ static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
809 /* Construct VLAN Table to give to HW */ 831 /* Construct VLAN Table to give to HW */
810 for (i = 0; i < VLAN_N_VID; i++) 832 for (i = 0; i < VLAN_N_VID; i++)
811 if (adapter->vlan_tag[i]) 833 if (adapter->vlan_tag[i])
812 vtag[ntags++] = cpu_to_le16(i); 834 vids[num++] = cpu_to_le16(i);
813 835
814 status = be_cmd_vlan_config(adapter, adapter->if_handle, 836 status = be_cmd_vlan_config(adapter, adapter->if_handle,
815 vtag, ntags, 1, 0); 837 vids, num, 1, 0);
816 838
817 /* Set to VLAN promisc mode as setting VLAN filter failed */ 839 /* Set to VLAN promisc mode as setting VLAN filter failed */
818 if (status) { 840 if (status) {
@@ -841,7 +863,7 @@ static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
841 863
842 adapter->vlan_tag[vid] = 1; 864 adapter->vlan_tag[vid] = 1;
843 if (adapter->vlans_added <= (adapter->max_vlans + 1)) 865 if (adapter->vlans_added <= (adapter->max_vlans + 1))
844 status = be_vid_config(adapter, false, 0); 866 status = be_vid_config(adapter);
845 867
846 if (!status) 868 if (!status)
847 adapter->vlans_added++; 869 adapter->vlans_added++;
@@ -863,7 +885,7 @@ static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
863 885
864 adapter->vlan_tag[vid] = 0; 886 adapter->vlan_tag[vid] = 0;
865 if (adapter->vlans_added <= adapter->max_vlans) 887 if (adapter->vlans_added <= adapter->max_vlans)
866 status = be_vid_config(adapter, false, 0); 888 status = be_vid_config(adapter);
867 889
868 if (!status) 890 if (!status)
869 adapter->vlans_added--; 891 adapter->vlans_added--;
@@ -890,7 +912,7 @@ static void be_set_rx_mode(struct net_device *netdev)
890 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF); 912 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
891 913
892 if (adapter->vlans_added) 914 if (adapter->vlans_added)
893 be_vid_config(adapter, false, 0); 915 be_vid_config(adapter);
894 } 916 }
895 917
896 /* Enable multicast promisc if num configured exceeds what we support */ 918 /* Enable multicast promisc if num configured exceeds what we support */
@@ -1057,13 +1079,16 @@ static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1057 u16 offset, stride; 1079 u16 offset, stride;
1058 1080
1059 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 1081 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1082 if (!pos)
1083 return 0;
1060 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset); 1084 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1061 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride); 1085 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1062 1086
1063 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL); 1087 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1064 while (dev) { 1088 while (dev) {
1065 vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF; 1089 vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
1066 if (dev->is_virtfn && dev->devfn == vf_fn) { 1090 if (dev->is_virtfn && dev->devfn == vf_fn &&
1091 dev->bus->number == pdev->bus->number) {
1067 vfs++; 1092 vfs++;
1068 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) 1093 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1069 assigned_vfs++; 1094 assigned_vfs++;
@@ -1203,16 +1228,16 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1203 /* Copy data in the first descriptor of this completion */ 1228 /* Copy data in the first descriptor of this completion */
1204 curr_frag_len = min(rxcp->pkt_size, rx_frag_size); 1229 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1205 1230
1206 /* Copy the header portion into skb_data */
1207 hdr_len = min(BE_HDR_LEN, curr_frag_len);
1208 memcpy(skb->data, start, hdr_len);
1209 skb->len = curr_frag_len; 1231 skb->len = curr_frag_len;
1210 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */ 1232 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1233 memcpy(skb->data, start, curr_frag_len);
1211 /* Complete packet has now been moved to data */ 1234 /* Complete packet has now been moved to data */
1212 put_page(page_info->page); 1235 put_page(page_info->page);
1213 skb->data_len = 0; 1236 skb->data_len = 0;
1214 skb->tail += curr_frag_len; 1237 skb->tail += curr_frag_len;
1215 } else { 1238 } else {
1239 hdr_len = ETH_HLEN;
1240 memcpy(skb->data, start, hdr_len);
1216 skb_shinfo(skb)->nr_frags = 1; 1241 skb_shinfo(skb)->nr_frags = 1;
1217 skb_frag_set_page(skb, 0, page_info->page); 1242 skb_frag_set_page(skb, 0, page_info->page);
1218 skb_shinfo(skb)->frags[0].page_offset = 1243 skb_shinfo(skb)->frags[0].page_offset =
@@ -1709,9 +1734,10 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
1709 int i; 1734 int i;
1710 1735
1711 for_all_evt_queues(adapter, eqo, i) { 1736 for_all_evt_queues(adapter, eqo, i) {
1712 be_eq_clean(eqo); 1737 if (eqo->q.created) {
1713 if (eqo->q.created) 1738 be_eq_clean(eqo);
1714 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ); 1739 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1740 }
1715 be_queue_free(adapter, &eqo->q); 1741 be_queue_free(adapter, &eqo->q);
1716 } 1742 }
1717} 1743}
@@ -1898,6 +1924,12 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
1898 */ 1924 */
1899 adapter->num_rx_qs = (num_irqs(adapter) > 1) ? 1925 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1900 num_irqs(adapter) + 1 : 1; 1926 num_irqs(adapter) + 1 : 1;
1927 if (adapter->num_rx_qs != MAX_RX_QS) {
1928 rtnl_lock();
1929 netif_set_real_num_rx_queues(adapter->netdev,
1930 adapter->num_rx_qs);
1931 rtnl_unlock();
1932 }
1901 1933
1902 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE; 1934 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1903 for_all_rx_queues(adapter, rxo, i) { 1935 for_all_rx_queues(adapter, rxo, i) {
@@ -2067,13 +2099,13 @@ int be_poll(struct napi_struct *napi, int budget)
2067 return max_work; 2099 return max_work;
2068} 2100}
2069 2101
2070void be_detect_dump_ue(struct be_adapter *adapter) 2102void be_detect_error(struct be_adapter *adapter)
2071{ 2103{
2072 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0; 2104 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2073 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0; 2105 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2074 u32 i; 2106 u32 i;
2075 2107
2076 if (adapter->eeh_err || adapter->ue_detected) 2108 if (be_crit_error(adapter))
2077 return; 2109 return;
2078 2110
2079 if (lancer_chip(adapter)) { 2111 if (lancer_chip(adapter)) {
@@ -2094,16 +2126,24 @@ void be_detect_dump_ue(struct be_adapter *adapter)
2094 pci_read_config_dword(adapter->pdev, 2126 pci_read_config_dword(adapter->pdev,
2095 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask); 2127 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2096 2128
2097 ue_lo = (ue_lo & (~ue_lo_mask)); 2129 ue_lo = (ue_lo & ~ue_lo_mask);
2098 ue_hi = (ue_hi & (~ue_hi_mask)); 2130 ue_hi = (ue_hi & ~ue_hi_mask);
2099 } 2131 }
2100 2132
2101 if (ue_lo || ue_hi || 2133 if (ue_lo || ue_hi ||
2102 sliport_status & SLIPORT_STATUS_ERR_MASK) { 2134 sliport_status & SLIPORT_STATUS_ERR_MASK) {
2103 adapter->ue_detected = true; 2135 adapter->hw_error = true;
2104 adapter->eeh_err = true; 2136 dev_err(&adapter->pdev->dev,
2137 "Error detected in the card\n");
2138 }
2139
2140 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2141 dev_err(&adapter->pdev->dev,
2142 "ERR: sliport status 0x%x\n", sliport_status);
2143 dev_err(&adapter->pdev->dev,
2144 "ERR: sliport error1 0x%x\n", sliport_err1);
2105 dev_err(&adapter->pdev->dev, 2145 dev_err(&adapter->pdev->dev,
2106 "Unrecoverable error in the card\n"); 2146 "ERR: sliport error2 0x%x\n", sliport_err2);
2107 } 2147 }
2108 2148
2109 if (ue_lo) { 2149 if (ue_lo) {
@@ -2113,6 +2153,7 @@ void be_detect_dump_ue(struct be_adapter *adapter)
2113 "UE: %s bit set\n", ue_status_low_desc[i]); 2153 "UE: %s bit set\n", ue_status_low_desc[i]);
2114 } 2154 }
2115 } 2155 }
2156
2116 if (ue_hi) { 2157 if (ue_hi) {
2117 for (i = 0; ue_hi; ue_hi >>= 1, i++) { 2158 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2118 if (ue_hi & 1) 2159 if (ue_hi & 1)
@@ -2121,14 +2162,6 @@ void be_detect_dump_ue(struct be_adapter *adapter)
2121 } 2162 }
2122 } 2163 }
2123 2164
2124 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2125 dev_err(&adapter->pdev->dev,
2126 "sliport status 0x%x\n", sliport_status);
2127 dev_err(&adapter->pdev->dev,
2128 "sliport error1 0x%x\n", sliport_err1);
2129 dev_err(&adapter->pdev->dev,
2130 "sliport error2 0x%x\n", sliport_err2);
2131 }
2132} 2165}
2133 2166
2134static void be_msix_disable(struct be_adapter *adapter) 2167static void be_msix_disable(struct be_adapter *adapter)
@@ -2141,12 +2174,14 @@ static void be_msix_disable(struct be_adapter *adapter)
2141 2174
2142static uint be_num_rss_want(struct be_adapter *adapter) 2175static uint be_num_rss_want(struct be_adapter *adapter)
2143{ 2176{
2177 u32 num = 0;
2144 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) && 2178 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2145 !sriov_want(adapter) && be_physfn(adapter) && 2179 !sriov_want(adapter) && be_physfn(adapter) &&
2146 !be_is_mc(adapter)) 2180 !be_is_mc(adapter)) {
2147 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS; 2181 num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2148 else 2182 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2149 return 0; 2183 }
2184 return num;
2150} 2185}
2151 2186
2152static void be_msix_enable(struct be_adapter *adapter) 2187static void be_msix_enable(struct be_adapter *adapter)
@@ -2540,11 +2575,7 @@ static int be_clear(struct be_adapter *adapter)
2540 be_tx_queues_destroy(adapter); 2575 be_tx_queues_destroy(adapter);
2541 be_evt_queues_destroy(adapter); 2576 be_evt_queues_destroy(adapter);
2542 2577
2543 /* tell fw we're done with firing cmds */
2544 be_cmd_fw_clean(adapter);
2545
2546 be_msix_disable(adapter); 2578 be_msix_disable(adapter);
2547 pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 0);
2548 return 0; 2579 return 0;
2549} 2580}
2550 2581
@@ -2602,8 +2633,8 @@ static int be_vf_setup(struct be_adapter *adapter)
2602 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | 2633 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2603 BE_IF_FLAGS_MULTICAST; 2634 BE_IF_FLAGS_MULTICAST;
2604 for_all_vfs(adapter, vf_cfg, vf) { 2635 for_all_vfs(adapter, vf_cfg, vf) {
2605 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL, 2636 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2606 &vf_cfg->if_handle, NULL, vf + 1); 2637 &vf_cfg->if_handle, vf + 1);
2607 if (status) 2638 if (status)
2608 goto err; 2639 goto err;
2609 } 2640 }
@@ -2643,29 +2674,43 @@ static void be_setup_init(struct be_adapter *adapter)
2643 adapter->phy.forced_port_speed = -1; 2674 adapter->phy.forced_port_speed = -1;
2644} 2675}
2645 2676
2646static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac) 2677static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2678 bool *active_mac, u32 *pmac_id)
2647{ 2679{
2648 u32 pmac_id; 2680 int status = 0;
2649 int status;
2650 bool pmac_id_active;
2651 2681
2652 status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active, 2682 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2653 &pmac_id, mac); 2683 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2654 if (status != 0) 2684 if (!lancer_chip(adapter) && !be_physfn(adapter))
2655 goto do_none; 2685 *active_mac = true;
2686 else
2687 *active_mac = false;
2656 2688
2657 if (pmac_id_active) { 2689 return status;
2658 status = be_cmd_mac_addr_query(adapter, mac, 2690 }
2659 MAC_ADDRESS_TYPE_NETWORK,
2660 false, adapter->if_handle, pmac_id);
2661 2691
2662 if (!status) 2692 if (lancer_chip(adapter)) {
2663 adapter->pmac_id[0] = pmac_id; 2693 status = be_cmd_get_mac_from_list(adapter, mac,
2694 active_mac, pmac_id, 0);
2695 if (*active_mac) {
2696 status = be_cmd_mac_addr_query(adapter, mac,
2697 MAC_ADDRESS_TYPE_NETWORK,
2698 false, if_handle,
2699 *pmac_id);
2700 }
2701 } else if (be_physfn(adapter)) {
2702 /* For BE3, for PF get permanent MAC */
2703 status = be_cmd_mac_addr_query(adapter, mac,
2704 MAC_ADDRESS_TYPE_NETWORK, true,
2705 0, 0);
2706 *active_mac = false;
2664 } else { 2707 } else {
2665 status = be_cmd_pmac_add(adapter, mac, 2708 /* For BE3, for VF get soft MAC assigned by PF*/
2666 adapter->if_handle, &adapter->pmac_id[0], 0); 2709 status = be_cmd_mac_addr_query(adapter, mac,
2710 MAC_ADDRESS_TYPE_NETWORK, false,
2711 if_handle, 0);
2712 *active_mac = true;
2667 } 2713 }
2668do_none:
2669 return status; 2714 return status;
2670} 2715}
2671 2716
@@ -2686,12 +2731,12 @@ static int be_get_config(struct be_adapter *adapter)
2686 2731
2687static int be_setup(struct be_adapter *adapter) 2732static int be_setup(struct be_adapter *adapter)
2688{ 2733{
2689 struct net_device *netdev = adapter->netdev;
2690 struct device *dev = &adapter->pdev->dev; 2734 struct device *dev = &adapter->pdev->dev;
2691 u32 cap_flags, en_flags; 2735 u32 cap_flags, en_flags;
2692 u32 tx_fc, rx_fc; 2736 u32 tx_fc, rx_fc;
2693 int status; 2737 int status;
2694 u8 mac[ETH_ALEN]; 2738 u8 mac[ETH_ALEN];
2739 bool active_mac;
2695 2740
2696 be_setup_init(adapter); 2741 be_setup_init(adapter);
2697 2742
@@ -2717,14 +2762,6 @@ static int be_setup(struct be_adapter *adapter)
2717 if (status) 2762 if (status)
2718 goto err; 2763 goto err;
2719 2764
2720 memset(mac, 0, ETH_ALEN);
2721 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2722 true /*permanent */, 0, 0);
2723 if (status)
2724 return status;
2725 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2726 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2727
2728 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | 2765 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2729 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS; 2766 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2730 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS | 2767 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
@@ -2734,27 +2771,36 @@ static int be_setup(struct be_adapter *adapter)
2734 cap_flags |= BE_IF_FLAGS_RSS; 2771 cap_flags |= BE_IF_FLAGS_RSS;
2735 en_flags |= BE_IF_FLAGS_RSS; 2772 en_flags |= BE_IF_FLAGS_RSS;
2736 } 2773 }
2774
2775 if (lancer_chip(adapter) && !be_physfn(adapter)) {
2776 en_flags = BE_IF_FLAGS_UNTAGGED |
2777 BE_IF_FLAGS_BROADCAST |
2778 BE_IF_FLAGS_MULTICAST;
2779 cap_flags = en_flags;
2780 }
2781
2737 status = be_cmd_if_create(adapter, cap_flags, en_flags, 2782 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2738 netdev->dev_addr, &adapter->if_handle, 2783 &adapter->if_handle, 0);
2739 &adapter->pmac_id[0], 0);
2740 if (status != 0) 2784 if (status != 0)
2741 goto err; 2785 goto err;
2742 2786
2743 /* The VF's permanent mac queried from card is incorrect. 2787 memset(mac, 0, ETH_ALEN);
2744 * For BEx: Query the mac configued by the PF using if_handle 2788 active_mac = false;
2745 * For Lancer: Get and use mac_list to obtain mac address. 2789 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2746 */ 2790 &active_mac, &adapter->pmac_id[0]);
2747 if (!be_physfn(adapter)) { 2791 if (status != 0)
2748 if (lancer_chip(adapter)) 2792 goto err;
2749 status = be_add_mac_from_list(adapter, mac); 2793
2750 else 2794 if (!active_mac) {
2751 status = be_cmd_mac_addr_query(adapter, mac, 2795 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2752 MAC_ADDRESS_TYPE_NETWORK, false, 2796 &adapter->pmac_id[0], 0);
2753 adapter->if_handle, 0); 2797 if (status != 0)
2754 if (!status) { 2798 goto err;
2755 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); 2799 }
2756 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); 2800
2757 } 2801 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2802 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2803 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2758 } 2804 }
2759 2805
2760 status = be_tx_qs_create(adapter); 2806 status = be_tx_qs_create(adapter);
@@ -2763,7 +2809,8 @@ static int be_setup(struct be_adapter *adapter)
2763 2809
2764 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL); 2810 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2765 2811
2766 be_vid_config(adapter, false, 0); 2812 if (adapter->vlans_added)
2813 be_vid_config(adapter);
2767 2814
2768 be_set_rx_mode(adapter->netdev); 2815 be_set_rx_mode(adapter->netdev);
2769 2816
@@ -2773,8 +2820,6 @@ static int be_setup(struct be_adapter *adapter)
2773 be_cmd_set_flow_control(adapter, adapter->tx_fc, 2820 be_cmd_set_flow_control(adapter, adapter->tx_fc,
2774 adapter->rx_fc); 2821 adapter->rx_fc);
2775 2822
2776 pcie_set_readrq(adapter->pdev, 4096);
2777
2778 if (be_physfn(adapter) && num_vfs) { 2823 if (be_physfn(adapter) && num_vfs) {
2779 if (adapter->dev_num_vfs) 2824 if (adapter->dev_num_vfs)
2780 be_vf_setup(adapter); 2825 be_vf_setup(adapter);
@@ -2788,8 +2833,6 @@ static int be_setup(struct be_adapter *adapter)
2788 2833
2789 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); 2834 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2790 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED; 2835 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2791
2792 pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 1);
2793 return 0; 2836 return 0;
2794err: 2837err:
2795 be_clear(adapter); 2838 be_clear(adapter);
@@ -3033,6 +3076,40 @@ static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3033 return 0; 3076 return 0;
3034} 3077}
3035 3078
3079static int lancer_wait_idle(struct be_adapter *adapter)
3080{
3081#define SLIPORT_IDLE_TIMEOUT 30
3082 u32 reg_val;
3083 int status = 0, i;
3084
3085 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3086 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3087 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3088 break;
3089
3090 ssleep(1);
3091 }
3092
3093 if (i == SLIPORT_IDLE_TIMEOUT)
3094 status = -1;
3095
3096 return status;
3097}
3098
3099static int lancer_fw_reset(struct be_adapter *adapter)
3100{
3101 int status = 0;
3102
3103 status = lancer_wait_idle(adapter);
3104 if (status)
3105 return status;
3106
3107 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3108 PHYSDEV_CONTROL_OFFSET);
3109
3110 return status;
3111}
3112
3036static int lancer_fw_download(struct be_adapter *adapter, 3113static int lancer_fw_download(struct be_adapter *adapter,
3037 const struct firmware *fw) 3114 const struct firmware *fw)
3038{ 3115{
@@ -3047,6 +3124,7 @@ static int lancer_fw_download(struct be_adapter *adapter,
3047 u32 offset = 0; 3124 u32 offset = 0;
3048 int status = 0; 3125 int status = 0;
3049 u8 add_status = 0; 3126 u8 add_status = 0;
3127 u8 change_status;
3050 3128
3051 if (!IS_ALIGNED(fw->size, sizeof(u32))) { 3129 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3052 dev_err(&adapter->pdev->dev, 3130 dev_err(&adapter->pdev->dev,
@@ -3079,9 +3157,10 @@ static int lancer_fw_download(struct be_adapter *adapter,
3079 memcpy(dest_image_ptr, data_ptr, chunk_size); 3157 memcpy(dest_image_ptr, data_ptr, chunk_size);
3080 3158
3081 status = lancer_cmd_write_object(adapter, &flash_cmd, 3159 status = lancer_cmd_write_object(adapter, &flash_cmd,
3082 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION, 3160 chunk_size, offset,
3083 &data_written, &add_status); 3161 LANCER_FW_DOWNLOAD_LOCATION,
3084 3162 &data_written, &change_status,
3163 &add_status);
3085 if (status) 3164 if (status)
3086 break; 3165 break;
3087 3166
@@ -3093,8 +3172,10 @@ static int lancer_fw_download(struct be_adapter *adapter,
3093 if (!status) { 3172 if (!status) {
3094 /* Commit the FW written */ 3173 /* Commit the FW written */
3095 status = lancer_cmd_write_object(adapter, &flash_cmd, 3174 status = lancer_cmd_write_object(adapter, &flash_cmd,
3096 0, offset, LANCER_FW_DOWNLOAD_LOCATION, 3175 0, offset,
3097 &data_written, &add_status); 3176 LANCER_FW_DOWNLOAD_LOCATION,
3177 &data_written, &change_status,
3178 &add_status);
3098 } 3179 }
3099 3180
3100 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va, 3181 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
@@ -3107,6 +3188,20 @@ static int lancer_fw_download(struct be_adapter *adapter,
3107 goto lancer_fw_exit; 3188 goto lancer_fw_exit;
3108 } 3189 }
3109 3190
3191 if (change_status == LANCER_FW_RESET_NEEDED) {
3192 status = lancer_fw_reset(adapter);
3193 if (status) {
3194 dev_err(&adapter->pdev->dev,
3195 "Adapter busy for FW reset.\n"
3196 "New FW will not be active.\n");
3197 goto lancer_fw_exit;
3198 }
3199 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3200 dev_err(&adapter->pdev->dev,
3201 "System reboot required for new FW"
3202 " to be active\n");
3203 }
3204
3110 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n"); 3205 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3111lancer_fw_exit: 3206lancer_fw_exit:
3112 return status; 3207 return status;
@@ -3435,10 +3530,15 @@ static void __devexit be_remove(struct pci_dev *pdev)
3435 3530
3436 be_roce_dev_remove(adapter); 3531 be_roce_dev_remove(adapter);
3437 3532
3533 cancel_delayed_work_sync(&adapter->func_recovery_work);
3534
3438 unregister_netdev(adapter->netdev); 3535 unregister_netdev(adapter->netdev);
3439 3536
3440 be_clear(adapter); 3537 be_clear(adapter);
3441 3538
3539 /* tell fw we're done with firing cmds */
3540 be_cmd_fw_clean(adapter);
3541
3442 be_stats_cleanup(adapter); 3542 be_stats_cleanup(adapter);
3443 3543
3444 be_ctrl_cleanup(adapter); 3544 be_ctrl_cleanup(adapter);
@@ -3530,6 +3630,9 @@ static int be_get_initial_config(struct be_adapter *adapter)
3530 if (be_is_wol_supported(adapter)) 3630 if (be_is_wol_supported(adapter))
3531 adapter->wol = true; 3631 adapter->wol = true;
3532 3632
3633 /* Must be a power of 2 or else MODULO will BUG_ON */
3634 adapter->be_get_temp_freq = 64;
3635
3533 level = be_get_fw_log_level(adapter); 3636 level = be_get_fw_log_level(adapter);
3534 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0; 3637 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3535 3638
@@ -3585,101 +3688,68 @@ static int be_dev_type_check(struct be_adapter *adapter)
3585 return 0; 3688 return 0;
3586} 3689}
3587 3690
3588static int lancer_wait_ready(struct be_adapter *adapter) 3691static int lancer_recover_func(struct be_adapter *adapter)
3589{ 3692{
3590#define SLIPORT_READY_TIMEOUT 30 3693 int status;
3591 u32 sliport_status;
3592 int status = 0, i;
3593 3694
3594 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) { 3695 status = lancer_test_and_set_rdy_state(adapter);
3595 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); 3696 if (status)
3596 if (sliport_status & SLIPORT_STATUS_RDY_MASK) 3697 goto err;
3597 break;
3598 3698
3599 msleep(1000); 3699 if (netif_running(adapter->netdev))
3600 } 3700 be_close(adapter->netdev);
3601 3701
3602 if (i == SLIPORT_READY_TIMEOUT) 3702 be_clear(adapter);
3603 status = -1;
3604 3703
3605 return status; 3704 adapter->hw_error = false;
3606} 3705 adapter->fw_timeout = false;
3607 3706
3608static int lancer_test_and_set_rdy_state(struct be_adapter *adapter) 3707 status = be_setup(adapter);
3609{ 3708 if (status)
3610 int status; 3709 goto err;
3611 u32 sliport_status, err, reset_needed; 3710
3612 status = lancer_wait_ready(adapter); 3711 if (netif_running(adapter->netdev)) {
3613 if (!status) { 3712 status = be_open(adapter->netdev);
3614 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); 3713 if (status)
3615 err = sliport_status & SLIPORT_STATUS_ERR_MASK; 3714 goto err;
3616 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3617 if (err && reset_needed) {
3618 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3619 adapter->db + SLIPORT_CONTROL_OFFSET);
3620
3621 /* check adapter has corrected the error */
3622 status = lancer_wait_ready(adapter);
3623 sliport_status = ioread32(adapter->db +
3624 SLIPORT_STATUS_OFFSET);
3625 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3626 SLIPORT_STATUS_RN_MASK);
3627 if (status || sliport_status)
3628 status = -1;
3629 } else if (err || reset_needed) {
3630 status = -1;
3631 }
3632 } 3715 }
3716
3717 dev_err(&adapter->pdev->dev,
3718 "Adapter SLIPORT recovery succeeded\n");
3719 return 0;
3720err:
3721 dev_err(&adapter->pdev->dev,
3722 "Adapter SLIPORT recovery failed\n");
3723
3633 return status; 3724 return status;
3634} 3725}
3635 3726
3636static void lancer_test_and_recover_fn_err(struct be_adapter *adapter) 3727static void be_func_recovery_task(struct work_struct *work)
3637{ 3728{
3729 struct be_adapter *adapter =
3730 container_of(work, struct be_adapter, func_recovery_work.work);
3638 int status; 3731 int status;
3639 u32 sliport_status;
3640
3641 if (adapter->eeh_err || adapter->ue_detected)
3642 return;
3643 3732
3644 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); 3733 be_detect_error(adapter);
3645 3734
3646 if (sliport_status & SLIPORT_STATUS_ERR_MASK) { 3735 if (adapter->hw_error && lancer_chip(adapter)) {
3647 dev_err(&adapter->pdev->dev,
3648 "Adapter in error state."
3649 "Trying to recover.\n");
3650 3736
3651 status = lancer_test_and_set_rdy_state(adapter); 3737 if (adapter->eeh_error)
3652 if (status) 3738 goto out;
3653 goto err;
3654 3739
3740 rtnl_lock();
3655 netif_device_detach(adapter->netdev); 3741 netif_device_detach(adapter->netdev);
3742 rtnl_unlock();
3656 3743
3657 if (netif_running(adapter->netdev)) 3744 status = lancer_recover_func(adapter);
3658 be_close(adapter->netdev);
3659
3660 be_clear(adapter);
3661
3662 adapter->fw_timeout = false;
3663
3664 status = be_setup(adapter);
3665 if (status)
3666 goto err;
3667
3668 if (netif_running(adapter->netdev)) {
3669 status = be_open(adapter->netdev);
3670 if (status)
3671 goto err;
3672 }
3673
3674 netif_device_attach(adapter->netdev);
3675 3745
3676 dev_err(&adapter->pdev->dev, 3746 if (!status)
3677 "Adapter error recovery succeeded\n"); 3747 netif_device_attach(adapter->netdev);
3678 } 3748 }
3679 return; 3749
3680err: 3750out:
3681 dev_err(&adapter->pdev->dev, 3751 schedule_delayed_work(&adapter->func_recovery_work,
3682 "Adapter error recovery failed\n"); 3752 msecs_to_jiffies(1000));
3683} 3753}
3684 3754
3685static void be_worker(struct work_struct *work) 3755static void be_worker(struct work_struct *work)
@@ -3690,11 +3760,6 @@ static void be_worker(struct work_struct *work)
3690 struct be_eq_obj *eqo; 3760 struct be_eq_obj *eqo;
3691 int i; 3761 int i;
3692 3762
3693 if (lancer_chip(adapter))
3694 lancer_test_and_recover_fn_err(adapter);
3695
3696 be_detect_dump_ue(adapter);
3697
3698 /* when interrupts are not yet enabled, just reap any pending 3763 /* when interrupts are not yet enabled, just reap any pending
3699 * mcc completions */ 3764 * mcc completions */
3700 if (!netif_running(adapter->netdev)) { 3765 if (!netif_running(adapter->netdev)) {
@@ -3710,6 +3775,9 @@ static void be_worker(struct work_struct *work)
3710 be_cmd_get_stats(adapter, &adapter->stats_cmd); 3775 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3711 } 3776 }
3712 3777
3778 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
3779 be_cmd_get_die_temperature(adapter);
3780
3713 for_all_rx_queues(adapter, rxo, i) { 3781 for_all_rx_queues(adapter, rxo, i) {
3714 if (rxo->rx_post_starved) { 3782 if (rxo->rx_post_starved) {
3715 rxo->rx_post_starved = false; 3783 rxo->rx_post_starved = false;
@@ -3727,10 +3795,7 @@ reschedule:
3727 3795
3728static bool be_reset_required(struct be_adapter *adapter) 3796static bool be_reset_required(struct be_adapter *adapter)
3729{ 3797{
3730 u32 reg; 3798 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
3731
3732 pci_read_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, &reg);
3733 return reg;
3734} 3799}
3735 3800
3736static int __devinit be_probe(struct pci_dev *pdev, 3801static int __devinit be_probe(struct pci_dev *pdev,
@@ -3739,6 +3804,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
3739 int status = 0; 3804 int status = 0;
3740 struct be_adapter *adapter; 3805 struct be_adapter *adapter;
3741 struct net_device *netdev; 3806 struct net_device *netdev;
3807 char port_name;
3742 3808
3743 status = pci_enable_device(pdev); 3809 status = pci_enable_device(pdev);
3744 if (status) 3810 if (status)
@@ -3749,7 +3815,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
3749 goto disable_dev; 3815 goto disable_dev;
3750 pci_set_master(pdev); 3816 pci_set_master(pdev);
3751 3817
3752 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS); 3818 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
3753 if (netdev == NULL) { 3819 if (netdev == NULL) {
3754 status = -ENOMEM; 3820 status = -ENOMEM;
3755 goto rel_reg; 3821 goto rel_reg;
@@ -3780,22 +3846,9 @@ static int __devinit be_probe(struct pci_dev *pdev,
3780 if (status) 3846 if (status)
3781 goto free_netdev; 3847 goto free_netdev;
3782 3848
3783 if (lancer_chip(adapter)) {
3784 status = lancer_wait_ready(adapter);
3785 if (!status) {
3786 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3787 adapter->db + SLIPORT_CONTROL_OFFSET);
3788 status = lancer_test_and_set_rdy_state(adapter);
3789 }
3790 if (status) {
3791 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3792 goto ctrl_clean;
3793 }
3794 }
3795
3796 /* sync up with fw's ready state */ 3849 /* sync up with fw's ready state */
3797 if (be_physfn(adapter)) { 3850 if (be_physfn(adapter)) {
3798 status = be_cmd_POST(adapter); 3851 status = be_fw_wait_ready(adapter);
3799 if (status) 3852 if (status)
3800 goto ctrl_clean; 3853 goto ctrl_clean;
3801 } 3854 }
@@ -3826,6 +3879,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
3826 goto stats_clean; 3879 goto stats_clean;
3827 3880
3828 INIT_DELAYED_WORK(&adapter->work, be_worker); 3881 INIT_DELAYED_WORK(&adapter->work, be_worker);
3882 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
3829 adapter->rx_fc = adapter->tx_fc = true; 3883 adapter->rx_fc = adapter->tx_fc = true;
3830 3884
3831 status = be_setup(adapter); 3885 status = be_setup(adapter);
@@ -3839,8 +3893,13 @@ static int __devinit be_probe(struct pci_dev *pdev,
3839 3893
3840 be_roce_dev_add(adapter); 3894 be_roce_dev_add(adapter);
3841 3895
3842 dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev), 3896 schedule_delayed_work(&adapter->func_recovery_work,
3843 adapter->port_num); 3897 msecs_to_jiffies(1000));
3898
3899 be_cmd_query_port_name(adapter, &port_name);
3900
3901 dev_info(&pdev->dev, "%s: %s port %c\n", netdev->name, nic_name(pdev),
3902 port_name);
3844 3903
3845 return 0; 3904 return 0;
3846 3905
@@ -3872,6 +3931,8 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3872 if (adapter->wol) 3931 if (adapter->wol)
3873 be_setup_wol(adapter, true); 3932 be_setup_wol(adapter, true);
3874 3933
3934 cancel_delayed_work_sync(&adapter->func_recovery_work);
3935
3875 netif_device_detach(netdev); 3936 netif_device_detach(netdev);
3876 if (netif_running(netdev)) { 3937 if (netif_running(netdev)) {
3877 rtnl_lock(); 3938 rtnl_lock();
@@ -3912,6 +3973,9 @@ static int be_resume(struct pci_dev *pdev)
3912 be_open(netdev); 3973 be_open(netdev);
3913 rtnl_unlock(); 3974 rtnl_unlock();
3914 } 3975 }
3976
3977 schedule_delayed_work(&adapter->func_recovery_work,
3978 msecs_to_jiffies(1000));
3915 netif_device_attach(netdev); 3979 netif_device_attach(netdev);
3916 3980
3917 if (adapter->wol) 3981 if (adapter->wol)
@@ -3931,6 +3995,7 @@ static void be_shutdown(struct pci_dev *pdev)
3931 return; 3995 return;
3932 3996
3933 cancel_delayed_work_sync(&adapter->work); 3997 cancel_delayed_work_sync(&adapter->work);
3998 cancel_delayed_work_sync(&adapter->func_recovery_work);
3934 3999
3935 netif_device_detach(adapter->netdev); 4000 netif_device_detach(adapter->netdev);
3936 4001
@@ -3950,9 +4015,13 @@ static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3950 4015
3951 dev_err(&adapter->pdev->dev, "EEH error detected\n"); 4016 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3952 4017
3953 adapter->eeh_err = true; 4018 adapter->eeh_error = true;
3954 4019
4020 cancel_delayed_work_sync(&adapter->func_recovery_work);
4021
4022 rtnl_lock();
3955 netif_device_detach(netdev); 4023 netif_device_detach(netdev);
4024 rtnl_unlock();
3956 4025
3957 if (netif_running(netdev)) { 4026 if (netif_running(netdev)) {
3958 rtnl_lock(); 4027 rtnl_lock();
@@ -3980,9 +4049,7 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3980 int status; 4049 int status;
3981 4050
3982 dev_info(&adapter->pdev->dev, "EEH reset\n"); 4051 dev_info(&adapter->pdev->dev, "EEH reset\n");
3983 adapter->eeh_err = false; 4052 be_clear_all_error(adapter);
3984 adapter->ue_detected = false;
3985 adapter->fw_timeout = false;
3986 4053
3987 status = pci_enable_device(pdev); 4054 status = pci_enable_device(pdev);
3988 if (status) 4055 if (status)
@@ -3993,7 +4060,7 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3993 pci_restore_state(pdev); 4060 pci_restore_state(pdev);
3994 4061
3995 /* Check if card is ok and fw is ready */ 4062 /* Check if card is ok and fw is ready */
3996 status = be_cmd_POST(adapter); 4063 status = be_fw_wait_ready(adapter);
3997 if (status) 4064 if (status)
3998 return PCI_ERS_RESULT_DISCONNECT; 4065 return PCI_ERS_RESULT_DISCONNECT;
3999 4066
@@ -4015,6 +4082,10 @@ static void be_eeh_resume(struct pci_dev *pdev)
4015 if (status) 4082 if (status)
4016 goto err; 4083 goto err;
4017 4084
4085 status = be_cmd_reset_function(adapter);
4086 if (status)
4087 goto err;
4088
4018 status = be_setup(adapter); 4089 status = be_setup(adapter);
4019 if (status) 4090 if (status)
4020 goto err; 4091 goto err;
@@ -4024,6 +4095,9 @@ static void be_eeh_resume(struct pci_dev *pdev)
4024 if (status) 4095 if (status)
4025 goto err; 4096 goto err;
4026 } 4097 }
4098
4099 schedule_delayed_work(&adapter->func_recovery_work,
4100 msecs_to_jiffies(1000));
4027 netif_device_attach(netdev); 4101 netif_device_attach(netdev);
4028 return; 4102 return;
4029err: 4103err:
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index a38167810546..94b7bfcdb24e 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -902,7 +902,7 @@ static const struct net_device_ops ethoc_netdev_ops = {
902}; 902};
903 903
904/** 904/**
905 * ethoc_probe() - initialize OpenCores ethernet MAC 905 * ethoc_probe - initialize OpenCores ethernet MAC
906 * pdev: platform device 906 * pdev: platform device
907 */ 907 */
908static int __devinit ethoc_probe(struct platform_device *pdev) 908static int __devinit ethoc_probe(struct platform_device *pdev)
@@ -1057,7 +1057,7 @@ static int __devinit ethoc_probe(struct platform_device *pdev)
1057 /* Check the MAC again for validity, if it still isn't choose and 1057 /* Check the MAC again for validity, if it still isn't choose and
1058 * program a random one. */ 1058 * program a random one. */
1059 if (!is_valid_ether_addr(netdev->dev_addr)) { 1059 if (!is_valid_ether_addr(netdev->dev_addr)) {
1060 random_ether_addr(netdev->dev_addr); 1060 eth_random_addr(netdev->dev_addr);
1061 random_mac = true; 1061 random_mac = true;
1062 } 1062 }
1063 1063
@@ -1140,7 +1140,7 @@ out:
1140} 1140}
1141 1141
1142/** 1142/**
1143 * ethoc_remove() - shutdown OpenCores ethernet MAC 1143 * ethoc_remove - shutdown OpenCores ethernet MAC
1144 * @pdev: platform device 1144 * @pdev: platform device
1145 */ 1145 */
1146static int __devexit ethoc_remove(struct platform_device *pdev) 1146static int __devexit ethoc_remove(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 16b07048274c..74d749e29aab 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -479,9 +479,14 @@ static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
479 rxdes = ftgmac100_current_rxdes(priv); 479 rxdes = ftgmac100_current_rxdes(priv);
480 } while (!done); 480 } while (!done);
481 481
482 if (skb->len <= 64) 482 /* Small frames are copied into linear part of skb to free one page */
483 if (skb->len <= 128) {
483 skb->truesize -= PAGE_SIZE; 484 skb->truesize -= PAGE_SIZE;
484 __pskb_pull_tail(skb, min(skb->len, 64U)); 485 __pskb_pull_tail(skb, skb->len);
486 } else {
487 /* We pull the minimum amount into linear part */
488 __pskb_pull_tail(skb, ETH_HLEN);
489 }
485 skb->protocol = eth_type_trans(skb, netdev); 490 skb->protocol = eth_type_trans(skb, netdev);
486 491
487 netdev->stats.rx_packets++; 492 netdev->stats.rx_packets++;
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 829b1092fd78..b901a01e3fa5 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -441,11 +441,14 @@ static bool ftmac100_rx_packet(struct ftmac100 *priv, int *processed)
441 skb->len += length; 441 skb->len += length;
442 skb->data_len += length; 442 skb->data_len += length;
443 443
444 /* page might be freed in __pskb_pull_tail() */ 444 if (length > 128) {
445 if (length > 64)
446 skb->truesize += PAGE_SIZE; 445 skb->truesize += PAGE_SIZE;
447 __pskb_pull_tail(skb, min(length, 64)); 446 /* We pull the minimum amount into linear part */
448 447 __pskb_pull_tail(skb, ETH_HLEN);
448 } else {
449 /* Small frames are copied into linear part to free one page */
450 __pskb_pull_tail(skb, length);
451 }
449 ftmac100_alloc_rx_page(priv, rxdes, GFP_ATOMIC); 452 ftmac100_alloc_rx_page(priv, rxdes, GFP_ATOMIC);
450 453
451 ftmac100_rx_pointer_advance(priv); 454 ftmac100_rx_pointer_advance(priv);
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index ff7f4c5115a1..fffd20528b5d 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -49,6 +49,7 @@
49#include <linux/of_gpio.h> 49#include <linux/of_gpio.h>
50#include <linux/of_net.h> 50#include <linux/of_net.h>
51#include <linux/pinctrl/consumer.h> 51#include <linux/pinctrl/consumer.h>
52#include <linux/regulator/consumer.h>
52 53
53#include <asm/cacheflush.h> 54#include <asm/cacheflush.h>
54 55
@@ -1388,8 +1389,8 @@ fec_set_mac_address(struct net_device *ndev, void *p)
1388} 1389}
1389 1390
1390#ifdef CONFIG_NET_POLL_CONTROLLER 1391#ifdef CONFIG_NET_POLL_CONTROLLER
1391/* 1392/**
1392 * fec_poll_controller: FEC Poll controller function 1393 * fec_poll_controller - FEC Poll controller function
1393 * @dev: The FEC network adapter 1394 * @dev: The FEC network adapter
1394 * 1395 *
1395 * Polled functionality used by netconsole and others in non interrupt mode 1396 * Polled functionality used by netconsole and others in non interrupt mode
@@ -1506,18 +1507,25 @@ static int __devinit fec_get_phy_mode_dt(struct platform_device *pdev)
1506static void __devinit fec_reset_phy(struct platform_device *pdev) 1507static void __devinit fec_reset_phy(struct platform_device *pdev)
1507{ 1508{
1508 int err, phy_reset; 1509 int err, phy_reset;
1510 int msec = 1;
1509 struct device_node *np = pdev->dev.of_node; 1511 struct device_node *np = pdev->dev.of_node;
1510 1512
1511 if (!np) 1513 if (!np)
1512 return; 1514 return;
1513 1515
1516 of_property_read_u32(np, "phy-reset-duration", &msec);
1517 /* A sane reset duration should not be longer than 1s */
1518 if (msec > 1000)
1519 msec = 1;
1520
1514 phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0); 1521 phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
1515 err = gpio_request_one(phy_reset, GPIOF_OUT_INIT_LOW, "phy-reset"); 1522 err = devm_gpio_request_one(&pdev->dev, phy_reset,
1523 GPIOF_OUT_INIT_LOW, "phy-reset");
1516 if (err) { 1524 if (err) {
1517 pr_debug("FEC: failed to get gpio phy-reset: %d\n", err); 1525 pr_debug("FEC: failed to get gpio phy-reset: %d\n", err);
1518 return; 1526 return;
1519 } 1527 }
1520 msleep(1); 1528 msleep(msec);
1521 gpio_set_value(phy_reset, 1); 1529 gpio_set_value(phy_reset, 1);
1522} 1530}
1523#else /* CONFIG_OF */ 1531#else /* CONFIG_OF */
@@ -1546,6 +1554,7 @@ fec_probe(struct platform_device *pdev)
1546 const struct of_device_id *of_id; 1554 const struct of_device_id *of_id;
1547 static int dev_id; 1555 static int dev_id;
1548 struct pinctrl *pinctrl; 1556 struct pinctrl *pinctrl;
1557 struct regulator *reg_phy;
1549 1558
1550 of_id = of_match_device(fec_dt_ids, &pdev->dev); 1559 of_id = of_match_device(fec_dt_ids, &pdev->dev);
1551 if (of_id) 1560 if (of_id)
@@ -1593,8 +1602,6 @@ fec_probe(struct platform_device *pdev)
1593 fep->phy_interface = ret; 1602 fep->phy_interface = ret;
1594 } 1603 }
1595 1604
1596 fec_reset_phy(pdev);
1597
1598 for (i = 0; i < FEC_IRQ_NUM; i++) { 1605 for (i = 0; i < FEC_IRQ_NUM; i++) {
1599 irq = platform_get_irq(pdev, i); 1606 irq = platform_get_irq(pdev, i);
1600 if (irq < 0) { 1607 if (irq < 0) {
@@ -1634,6 +1641,18 @@ fec_probe(struct platform_device *pdev)
1634 clk_prepare_enable(fep->clk_ahb); 1641 clk_prepare_enable(fep->clk_ahb);
1635 clk_prepare_enable(fep->clk_ipg); 1642 clk_prepare_enable(fep->clk_ipg);
1636 1643
1644 reg_phy = devm_regulator_get(&pdev->dev, "phy");
1645 if (!IS_ERR(reg_phy)) {
1646 ret = regulator_enable(reg_phy);
1647 if (ret) {
1648 dev_err(&pdev->dev,
1649 "Failed to enable phy regulator: %d\n", ret);
1650 goto failed_regulator;
1651 }
1652 }
1653
1654 fec_reset_phy(pdev);
1655
1637 ret = fec_enet_init(ndev); 1656 ret = fec_enet_init(ndev);
1638 if (ret) 1657 if (ret)
1639 goto failed_init; 1658 goto failed_init;
@@ -1655,6 +1674,7 @@ failed_register:
1655 fec_enet_mii_remove(fep); 1674 fec_enet_mii_remove(fep);
1656failed_mii_init: 1675failed_mii_init:
1657failed_init: 1676failed_init:
1677failed_regulator:
1658 clk_disable_unprepare(fep->clk_ahb); 1678 clk_disable_unprepare(fep->clk_ahb);
1659 clk_disable_unprepare(fep->clk_ipg); 1679 clk_disable_unprepare(fep->clk_ipg);
1660failed_pin: 1680failed_pin:
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index f7f0bf5d037b..9527b28d70d1 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -47,6 +47,9 @@
47#include "gianfar.h" 47#include "gianfar.h"
48#include "fsl_pq_mdio.h" 48#include "fsl_pq_mdio.h"
49 49
50/* Number of microseconds to wait for an MII register to respond */
51#define MII_TIMEOUT 1000
52
50struct fsl_pq_mdio_priv { 53struct fsl_pq_mdio_priv {
51 void __iomem *map; 54 void __iomem *map;
52 struct fsl_pq_mdio __iomem *regs; 55 struct fsl_pq_mdio __iomem *regs;
@@ -64,6 +67,8 @@ struct fsl_pq_mdio_priv {
64int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id, 67int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
65 int regnum, u16 value) 68 int regnum, u16 value)
66{ 69{
70 u32 status;
71
67 /* Set the PHY address and the register address we want to write */ 72 /* Set the PHY address and the register address we want to write */
68 out_be32(&regs->miimadd, (mii_id << 8) | regnum); 73 out_be32(&regs->miimadd, (mii_id << 8) | regnum);
69 74
@@ -71,10 +76,10 @@ int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
71 out_be32(&regs->miimcon, value); 76 out_be32(&regs->miimcon, value);
72 77
73 /* Wait for the transaction to finish */ 78 /* Wait for the transaction to finish */
74 while (in_be32(&regs->miimind) & MIIMIND_BUSY) 79 status = spin_event_timeout(!(in_be32(&regs->miimind) & MIIMIND_BUSY),
75 cpu_relax(); 80 MII_TIMEOUT, 0);
76 81
77 return 0; 82 return status ? 0 : -ETIMEDOUT;
78} 83}
79 84
80/* 85/*
@@ -91,6 +96,7 @@ int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
91 int mii_id, int regnum) 96 int mii_id, int regnum)
92{ 97{
93 u16 value; 98 u16 value;
99 u32 status;
94 100
95 /* Set the PHY address and the register address we want to read */ 101 /* Set the PHY address and the register address we want to read */
96 out_be32(&regs->miimadd, (mii_id << 8) | regnum); 102 out_be32(&regs->miimadd, (mii_id << 8) | regnum);
@@ -99,9 +105,12 @@ int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
99 out_be32(&regs->miimcom, 0); 105 out_be32(&regs->miimcom, 0);
100 out_be32(&regs->miimcom, MII_READ_COMMAND); 106 out_be32(&regs->miimcom, MII_READ_COMMAND);
101 107
102 /* Wait for the transaction to finish */ 108 /* Wait for the transaction to finish, normally less than 100us */
103 while (in_be32(&regs->miimind) & (MIIMIND_NOTVALID | MIIMIND_BUSY)) 109 status = spin_event_timeout(!(in_be32(&regs->miimind) &
104 cpu_relax(); 110 (MIIMIND_NOTVALID | MIIMIND_BUSY)),
111 MII_TIMEOUT, 0);
112 if (!status)
113 return -ETIMEDOUT;
105 114
106 /* Grab the value of the register from miimstat */ 115 /* Grab the value of the register from miimstat */
107 value = in_be32(&regs->miimstat); 116 value = in_be32(&regs->miimstat);
@@ -144,7 +153,7 @@ int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
144static int fsl_pq_mdio_reset(struct mii_bus *bus) 153static int fsl_pq_mdio_reset(struct mii_bus *bus)
145{ 154{
146 struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus); 155 struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
147 int timeout = PHY_INIT_TIMEOUT; 156 u32 status;
148 157
149 mutex_lock(&bus->mdio_lock); 158 mutex_lock(&bus->mdio_lock);
150 159
@@ -155,12 +164,12 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus)
155 out_be32(&regs->miimcfg, MIIMCFG_INIT_VALUE); 164 out_be32(&regs->miimcfg, MIIMCFG_INIT_VALUE);
156 165
157 /* Wait until the bus is free */ 166 /* Wait until the bus is free */
158 while ((in_be32(&regs->miimind) & MIIMIND_BUSY) && timeout--) 167 status = spin_event_timeout(!(in_be32(&regs->miimind) & MIIMIND_BUSY),
159 cpu_relax(); 168 MII_TIMEOUT, 0);
160 169
161 mutex_unlock(&bus->mdio_lock); 170 mutex_unlock(&bus->mdio_lock);
162 171
163 if (timeout < 0) { 172 if (!status) {
164 printk(KERN_ERR "%s: The MII Bus is stuck!\n", 173 printk(KERN_ERR "%s: The MII Bus is stuck!\n",
165 bus->name); 174 bus->name);
166 return -EBUSY; 175 return -EBUSY;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index ab1d80ff0791..4605f7246687 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1,5 +1,4 @@
1/* 1/* drivers/net/ethernet/freescale/gianfar.c
2 * drivers/net/ethernet/freescale/gianfar.c
3 * 2 *
4 * Gianfar Ethernet Driver 3 * Gianfar Ethernet Driver
5 * This driver is designed for the non-CPM ethernet controllers 4 * This driver is designed for the non-CPM ethernet controllers
@@ -114,7 +113,7 @@ static void gfar_timeout(struct net_device *dev);
114static int gfar_close(struct net_device *dev); 113static int gfar_close(struct net_device *dev);
115struct sk_buff *gfar_new_skb(struct net_device *dev); 114struct sk_buff *gfar_new_skb(struct net_device *dev);
116static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, 115static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
117 struct sk_buff *skb); 116 struct sk_buff *skb);
118static int gfar_set_mac_address(struct net_device *dev); 117static int gfar_set_mac_address(struct net_device *dev);
119static int gfar_change_mtu(struct net_device *dev, int new_mtu); 118static int gfar_change_mtu(struct net_device *dev, int new_mtu);
120static irqreturn_t gfar_error(int irq, void *dev_id); 119static irqreturn_t gfar_error(int irq, void *dev_id);
@@ -266,8 +265,8 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
266 tx_queue->tx_bd_dma_base = addr; 265 tx_queue->tx_bd_dma_base = addr;
267 tx_queue->dev = ndev; 266 tx_queue->dev = ndev;
268 /* enet DMA only understands physical addresses */ 267 /* enet DMA only understands physical addresses */
269 addr += sizeof(struct txbd8) *tx_queue->tx_ring_size; 268 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
270 vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size; 269 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
271 } 270 }
272 271
273 /* Start the rx descriptor ring where the tx ring leaves off */ 272 /* Start the rx descriptor ring where the tx ring leaves off */
@@ -276,15 +275,16 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
276 rx_queue->rx_bd_base = vaddr; 275 rx_queue->rx_bd_base = vaddr;
277 rx_queue->rx_bd_dma_base = addr; 276 rx_queue->rx_bd_dma_base = addr;
278 rx_queue->dev = ndev; 277 rx_queue->dev = ndev;
279 addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; 278 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
280 vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; 279 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
281 } 280 }
282 281
283 /* Setup the skbuff rings */ 282 /* Setup the skbuff rings */
284 for (i = 0; i < priv->num_tx_queues; i++) { 283 for (i = 0; i < priv->num_tx_queues; i++) {
285 tx_queue = priv->tx_queue[i]; 284 tx_queue = priv->tx_queue[i];
286 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) * 285 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
287 tx_queue->tx_ring_size, GFP_KERNEL); 286 tx_queue->tx_ring_size,
287 GFP_KERNEL);
288 if (!tx_queue->tx_skbuff) { 288 if (!tx_queue->tx_skbuff) {
289 netif_err(priv, ifup, ndev, 289 netif_err(priv, ifup, ndev,
290 "Could not allocate tx_skbuff\n"); 290 "Could not allocate tx_skbuff\n");
@@ -298,7 +298,8 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
298 for (i = 0; i < priv->num_rx_queues; i++) { 298 for (i = 0; i < priv->num_rx_queues; i++) {
299 rx_queue = priv->rx_queue[i]; 299 rx_queue = priv->rx_queue[i];
300 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) * 300 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
301 rx_queue->rx_ring_size, GFP_KERNEL); 301 rx_queue->rx_ring_size,
302 GFP_KERNEL);
302 303
303 if (!rx_queue->rx_skbuff) { 304 if (!rx_queue->rx_skbuff) {
304 netif_err(priv, ifup, ndev, 305 netif_err(priv, ifup, ndev,
@@ -327,15 +328,15 @@ static void gfar_init_tx_rx_base(struct gfar_private *priv)
327 int i; 328 int i;
328 329
329 baddr = &regs->tbase0; 330 baddr = &regs->tbase0;
330 for(i = 0; i < priv->num_tx_queues; i++) { 331 for (i = 0; i < priv->num_tx_queues; i++) {
331 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); 332 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
332 baddr += 2; 333 baddr += 2;
333 } 334 }
334 335
335 baddr = &regs->rbase0; 336 baddr = &regs->rbase0;
336 for(i = 0; i < priv->num_rx_queues; i++) { 337 for (i = 0; i < priv->num_rx_queues; i++) {
337 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); 338 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
338 baddr += 2; 339 baddr += 2;
339 } 340 }
340} 341}
341 342
@@ -405,7 +406,8 @@ static void gfar_init_mac(struct net_device *ndev)
405 gfar_write(&regs->attreli, attrs); 406 gfar_write(&regs->attreli, attrs);
406 407
407 /* Start with defaults, and add stashing or locking 408 /* Start with defaults, and add stashing or locking
408 * depending on the approprate variables */ 409 * depending on the approprate variables
410 */
409 attrs = ATTR_INIT_SETTINGS; 411 attrs = ATTR_INIT_SETTINGS;
410 412
411 if (priv->bd_stash_en) 413 if (priv->bd_stash_en)
@@ -426,16 +428,16 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev)
426 struct gfar_private *priv = netdev_priv(dev); 428 struct gfar_private *priv = netdev_priv(dev);
427 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; 429 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
428 unsigned long tx_packets = 0, tx_bytes = 0; 430 unsigned long tx_packets = 0, tx_bytes = 0;
429 int i = 0; 431 int i;
430 432
431 for (i = 0; i < priv->num_rx_queues; i++) { 433 for (i = 0; i < priv->num_rx_queues; i++) {
432 rx_packets += priv->rx_queue[i]->stats.rx_packets; 434 rx_packets += priv->rx_queue[i]->stats.rx_packets;
433 rx_bytes += priv->rx_queue[i]->stats.rx_bytes; 435 rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
434 rx_dropped += priv->rx_queue[i]->stats.rx_dropped; 436 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
435 } 437 }
436 438
437 dev->stats.rx_packets = rx_packets; 439 dev->stats.rx_packets = rx_packets;
438 dev->stats.rx_bytes = rx_bytes; 440 dev->stats.rx_bytes = rx_bytes;
439 dev->stats.rx_dropped = rx_dropped; 441 dev->stats.rx_dropped = rx_dropped;
440 442
441 for (i = 0; i < priv->num_tx_queues; i++) { 443 for (i = 0; i < priv->num_tx_queues; i++) {
@@ -443,7 +445,7 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev)
443 tx_packets += priv->tx_queue[i]->stats.tx_packets; 445 tx_packets += priv->tx_queue[i]->stats.tx_packets;
444 } 446 }
445 447
446 dev->stats.tx_bytes = tx_bytes; 448 dev->stats.tx_bytes = tx_bytes;
447 dev->stats.tx_packets = tx_packets; 449 dev->stats.tx_packets = tx_packets;
448 450
449 return &dev->stats; 451 return &dev->stats;
@@ -468,7 +470,7 @@ static const struct net_device_ops gfar_netdev_ops = {
468 470
469void lock_rx_qs(struct gfar_private *priv) 471void lock_rx_qs(struct gfar_private *priv)
470{ 472{
471 int i = 0x0; 473 int i;
472 474
473 for (i = 0; i < priv->num_rx_queues; i++) 475 for (i = 0; i < priv->num_rx_queues; i++)
474 spin_lock(&priv->rx_queue[i]->rxlock); 476 spin_lock(&priv->rx_queue[i]->rxlock);
@@ -476,7 +478,7 @@ void lock_rx_qs(struct gfar_private *priv)
476 478
477void lock_tx_qs(struct gfar_private *priv) 479void lock_tx_qs(struct gfar_private *priv)
478{ 480{
479 int i = 0x0; 481 int i;
480 482
481 for (i = 0; i < priv->num_tx_queues; i++) 483 for (i = 0; i < priv->num_tx_queues; i++)
482 spin_lock(&priv->tx_queue[i]->txlock); 484 spin_lock(&priv->tx_queue[i]->txlock);
@@ -484,7 +486,7 @@ void lock_tx_qs(struct gfar_private *priv)
484 486
485void unlock_rx_qs(struct gfar_private *priv) 487void unlock_rx_qs(struct gfar_private *priv)
486{ 488{
487 int i = 0x0; 489 int i;
488 490
489 for (i = 0; i < priv->num_rx_queues; i++) 491 for (i = 0; i < priv->num_rx_queues; i++)
490 spin_unlock(&priv->rx_queue[i]->rxlock); 492 spin_unlock(&priv->rx_queue[i]->rxlock);
@@ -492,7 +494,7 @@ void unlock_rx_qs(struct gfar_private *priv)
492 494
493void unlock_tx_qs(struct gfar_private *priv) 495void unlock_tx_qs(struct gfar_private *priv)
494{ 496{
495 int i = 0x0; 497 int i;
496 498
497 for (i = 0; i < priv->num_tx_queues; i++) 499 for (i = 0; i < priv->num_tx_queues; i++)
498 spin_unlock(&priv->tx_queue[i]->txlock); 500 spin_unlock(&priv->tx_queue[i]->txlock);
@@ -508,13 +510,13 @@ static bool gfar_is_vlan_on(struct gfar_private *priv)
508static inline int gfar_uses_fcb(struct gfar_private *priv) 510static inline int gfar_uses_fcb(struct gfar_private *priv)
509{ 511{
510 return gfar_is_vlan_on(priv) || 512 return gfar_is_vlan_on(priv) ||
511 (priv->ndev->features & NETIF_F_RXCSUM) || 513 (priv->ndev->features & NETIF_F_RXCSUM) ||
512 (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER); 514 (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
513} 515}
514 516
515static void free_tx_pointers(struct gfar_private *priv) 517static void free_tx_pointers(struct gfar_private *priv)
516{ 518{
517 int i = 0; 519 int i;
518 520
519 for (i = 0; i < priv->num_tx_queues; i++) 521 for (i = 0; i < priv->num_tx_queues; i++)
520 kfree(priv->tx_queue[i]); 522 kfree(priv->tx_queue[i]);
@@ -522,7 +524,7 @@ static void free_tx_pointers(struct gfar_private *priv)
522 524
523static void free_rx_pointers(struct gfar_private *priv) 525static void free_rx_pointers(struct gfar_private *priv)
524{ 526{
525 int i = 0; 527 int i;
526 528
527 for (i = 0; i < priv->num_rx_queues; i++) 529 for (i = 0; i < priv->num_rx_queues; i++)
528 kfree(priv->rx_queue[i]); 530 kfree(priv->rx_queue[i]);
@@ -530,7 +532,7 @@ static void free_rx_pointers(struct gfar_private *priv)
530 532
531static void unmap_group_regs(struct gfar_private *priv) 533static void unmap_group_regs(struct gfar_private *priv)
532{ 534{
533 int i = 0; 535 int i;
534 536
535 for (i = 0; i < MAXGROUPS; i++) 537 for (i = 0; i < MAXGROUPS; i++)
536 if (priv->gfargrp[i].regs) 538 if (priv->gfargrp[i].regs)
@@ -539,7 +541,7 @@ static void unmap_group_regs(struct gfar_private *priv)
539 541
540static void disable_napi(struct gfar_private *priv) 542static void disable_napi(struct gfar_private *priv)
541{ 543{
542 int i = 0; 544 int i;
543 545
544 for (i = 0; i < priv->num_grps; i++) 546 for (i = 0; i < priv->num_grps; i++)
545 napi_disable(&priv->gfargrp[i].napi); 547 napi_disable(&priv->gfargrp[i].napi);
@@ -547,14 +549,14 @@ static void disable_napi(struct gfar_private *priv)
547 549
548static void enable_napi(struct gfar_private *priv) 550static void enable_napi(struct gfar_private *priv)
549{ 551{
550 int i = 0; 552 int i;
551 553
552 for (i = 0; i < priv->num_grps; i++) 554 for (i = 0; i < priv->num_grps; i++)
553 napi_enable(&priv->gfargrp[i].napi); 555 napi_enable(&priv->gfargrp[i].napi);
554} 556}
555 557
556static int gfar_parse_group(struct device_node *np, 558static int gfar_parse_group(struct device_node *np,
557 struct gfar_private *priv, const char *model) 559 struct gfar_private *priv, const char *model)
558{ 560{
559 u32 *queue_mask; 561 u32 *queue_mask;
560 562
@@ -580,15 +582,13 @@ static int gfar_parse_group(struct device_node *np,
580 priv->gfargrp[priv->num_grps].grp_id = priv->num_grps; 582 priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
581 priv->gfargrp[priv->num_grps].priv = priv; 583 priv->gfargrp[priv->num_grps].priv = priv;
582 spin_lock_init(&priv->gfargrp[priv->num_grps].grplock); 584 spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
583 if(priv->mode == MQ_MG_MODE) { 585 if (priv->mode == MQ_MG_MODE) {
584 queue_mask = (u32 *)of_get_property(np, 586 queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
585 "fsl,rx-bit-map", NULL); 587 priv->gfargrp[priv->num_grps].rx_bit_map = queue_mask ?
586 priv->gfargrp[priv->num_grps].rx_bit_map = 588 *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
587 queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps); 589 queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
588 queue_mask = (u32 *)of_get_property(np, 590 priv->gfargrp[priv->num_grps].tx_bit_map = queue_mask ?
589 "fsl,tx-bit-map", NULL); 591 *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
590 priv->gfargrp[priv->num_grps].tx_bit_map =
591 queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
592 } else { 592 } else {
593 priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF; 593 priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
594 priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF; 594 priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
@@ -652,7 +652,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
652 priv->num_rx_queues = num_rx_qs; 652 priv->num_rx_queues = num_rx_qs;
653 priv->num_grps = 0x0; 653 priv->num_grps = 0x0;
654 654
655 /* Init Rx queue filer rule set linked list*/ 655 /* Init Rx queue filer rule set linked list */
656 INIT_LIST_HEAD(&priv->rx_list.list); 656 INIT_LIST_HEAD(&priv->rx_list.list);
657 priv->rx_list.count = 0; 657 priv->rx_list.count = 0;
658 mutex_init(&priv->rx_queue_access); 658 mutex_init(&priv->rx_queue_access);
@@ -673,7 +673,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
673 } else { 673 } else {
674 priv->mode = SQ_SG_MODE; 674 priv->mode = SQ_SG_MODE;
675 err = gfar_parse_group(np, priv, model); 675 err = gfar_parse_group(np, priv, model);
676 if(err) 676 if (err)
677 goto err_grp_init; 677 goto err_grp_init;
678 } 678 }
679 679
@@ -730,27 +730,27 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
730 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; 730 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
731 731
732 mac_addr = of_get_mac_address(np); 732 mac_addr = of_get_mac_address(np);
733
733 if (mac_addr) 734 if (mac_addr)
734 memcpy(dev->dev_addr, mac_addr, ETH_ALEN); 735 memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
735 736
736 if (model && !strcasecmp(model, "TSEC")) 737 if (model && !strcasecmp(model, "TSEC"))
737 priv->device_flags = 738 priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
738 FSL_GIANFAR_DEV_HAS_GIGABIT | 739 FSL_GIANFAR_DEV_HAS_COALESCE |
739 FSL_GIANFAR_DEV_HAS_COALESCE | 740 FSL_GIANFAR_DEV_HAS_RMON |
740 FSL_GIANFAR_DEV_HAS_RMON | 741 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
741 FSL_GIANFAR_DEV_HAS_MULTI_INTR; 742
742 if (model && !strcasecmp(model, "eTSEC")) 743 if (model && !strcasecmp(model, "eTSEC"))
743 priv->device_flags = 744 priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
744 FSL_GIANFAR_DEV_HAS_GIGABIT | 745 FSL_GIANFAR_DEV_HAS_COALESCE |
745 FSL_GIANFAR_DEV_HAS_COALESCE | 746 FSL_GIANFAR_DEV_HAS_RMON |
746 FSL_GIANFAR_DEV_HAS_RMON | 747 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
747 FSL_GIANFAR_DEV_HAS_MULTI_INTR | 748 FSL_GIANFAR_DEV_HAS_PADDING |
748 FSL_GIANFAR_DEV_HAS_PADDING | 749 FSL_GIANFAR_DEV_HAS_CSUM |
749 FSL_GIANFAR_DEV_HAS_CSUM | 750 FSL_GIANFAR_DEV_HAS_VLAN |
750 FSL_GIANFAR_DEV_HAS_VLAN | 751 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
751 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | 752 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
752 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | 753 FSL_GIANFAR_DEV_HAS_TIMER;
753 FSL_GIANFAR_DEV_HAS_TIMER;
754 754
755 ctype = of_get_property(np, "phy-connection-type", NULL); 755 ctype = of_get_property(np, "phy-connection-type", NULL);
756 756
@@ -781,7 +781,7 @@ err_grp_init:
781} 781}
782 782
783static int gfar_hwtstamp_ioctl(struct net_device *netdev, 783static int gfar_hwtstamp_ioctl(struct net_device *netdev,
784 struct ifreq *ifr, int cmd) 784 struct ifreq *ifr, int cmd)
785{ 785{
786 struct hwtstamp_config config; 786 struct hwtstamp_config config;
787 struct gfar_private *priv = netdev_priv(netdev); 787 struct gfar_private *priv = netdev_priv(netdev);
@@ -851,6 +851,7 @@ static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
851{ 851{
852 unsigned int new_bit_map = 0x0; 852 unsigned int new_bit_map = 0x0;
853 int mask = 0x1 << (max_qs - 1), i; 853 int mask = 0x1 << (max_qs - 1), i;
854
854 for (i = 0; i < max_qs; i++) { 855 for (i = 0; i < max_qs; i++) {
855 if (bit_map & mask) 856 if (bit_map & mask)
856 new_bit_map = new_bit_map + (1 << i); 857 new_bit_map = new_bit_map + (1 << i);
@@ -936,22 +937,22 @@ static void gfar_detect_errata(struct gfar_private *priv)
936 937
937 /* MPC8313 Rev 2.0 and higher; All MPC837x */ 938 /* MPC8313 Rev 2.0 and higher; All MPC837x */
938 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) || 939 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
939 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 940 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
940 priv->errata |= GFAR_ERRATA_74; 941 priv->errata |= GFAR_ERRATA_74;
941 942
942 /* MPC8313 and MPC837x all rev */ 943 /* MPC8313 and MPC837x all rev */
943 if ((pvr == 0x80850010 && mod == 0x80b0) || 944 if ((pvr == 0x80850010 && mod == 0x80b0) ||
944 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 945 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
945 priv->errata |= GFAR_ERRATA_76; 946 priv->errata |= GFAR_ERRATA_76;
946 947
947 /* MPC8313 and MPC837x all rev */ 948 /* MPC8313 and MPC837x all rev */
948 if ((pvr == 0x80850010 && mod == 0x80b0) || 949 if ((pvr == 0x80850010 && mod == 0x80b0) ||
949 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 950 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
950 priv->errata |= GFAR_ERRATA_A002; 951 priv->errata |= GFAR_ERRATA_A002;
951 952
952 /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */ 953 /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
953 if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) || 954 if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
954 (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020)) 955 (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
955 priv->errata |= GFAR_ERRATA_12; 956 priv->errata |= GFAR_ERRATA_12;
956 957
957 if (priv->errata) 958 if (priv->errata)
@@ -960,7 +961,8 @@ static void gfar_detect_errata(struct gfar_private *priv)
960} 961}
961 962
962/* Set up the ethernet device structure, private data, 963/* Set up the ethernet device structure, private data,
963 * and anything else we need before we start */ 964 * and anything else we need before we start
965 */
964static int gfar_probe(struct platform_device *ofdev) 966static int gfar_probe(struct platform_device *ofdev)
965{ 967{
966 u32 tempval; 968 u32 tempval;
@@ -991,8 +993,9 @@ static int gfar_probe(struct platform_device *ofdev)
991 993
992 gfar_detect_errata(priv); 994 gfar_detect_errata(priv);
993 995
994 /* Stop the DMA engine now, in case it was running before */ 996 /* Stop the DMA engine now, in case it was running before
995 /* (The firmware could have used it, and left it running). */ 997 * (The firmware could have used it, and left it running).
998 */
996 gfar_halt(dev); 999 gfar_halt(dev);
997 1000
998 /* Reset MAC layer */ 1001 /* Reset MAC layer */
@@ -1026,13 +1029,14 @@ static int gfar_probe(struct platform_device *ofdev)
1026 1029
1027 /* Register for napi ...We are registering NAPI for each grp */ 1030 /* Register for napi ...We are registering NAPI for each grp */
1028 for (i = 0; i < priv->num_grps; i++) 1031 for (i = 0; i < priv->num_grps; i++)
1029 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT); 1032 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll,
1033 GFAR_DEV_WEIGHT);
1030 1034
1031 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 1035 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1032 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | 1036 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1033 NETIF_F_RXCSUM; 1037 NETIF_F_RXCSUM;
1034 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | 1038 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1035 NETIF_F_RXCSUM | NETIF_F_HIGHDMA; 1039 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1036 } 1040 }
1037 1041
1038 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { 1042 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
@@ -1081,7 +1085,7 @@ static int gfar_probe(struct platform_device *ofdev)
1081 priv->padding = 0; 1085 priv->padding = 0;
1082 1086
1083 if (dev->features & NETIF_F_IP_CSUM || 1087 if (dev->features & NETIF_F_IP_CSUM ||
1084 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) 1088 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1085 dev->needed_headroom = GMAC_FCB_LEN; 1089 dev->needed_headroom = GMAC_FCB_LEN;
1086 1090
1087 /* Program the isrg regs only if number of grps > 1 */ 1091 /* Program the isrg regs only if number of grps > 1 */
@@ -1098,28 +1102,32 @@ static int gfar_probe(struct platform_device *ofdev)
1098 1102
1099 /* Need to reverse the bit maps as bit_map's MSB is q0 1103 /* Need to reverse the bit maps as bit_map's MSB is q0
1100 * but, for_each_set_bit parses from right to left, which 1104 * but, for_each_set_bit parses from right to left, which
1101 * basically reverses the queue numbers */ 1105 * basically reverses the queue numbers
1106 */
1102 for (i = 0; i< priv->num_grps; i++) { 1107 for (i = 0; i< priv->num_grps; i++) {
1103 priv->gfargrp[i].tx_bit_map = reverse_bitmap( 1108 priv->gfargrp[i].tx_bit_map =
1104 priv->gfargrp[i].tx_bit_map, MAX_TX_QS); 1109 reverse_bitmap(priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
1105 priv->gfargrp[i].rx_bit_map = reverse_bitmap( 1110 priv->gfargrp[i].rx_bit_map =
1106 priv->gfargrp[i].rx_bit_map, MAX_RX_QS); 1111 reverse_bitmap(priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
1107 } 1112 }
1108 1113
1109 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, 1114 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
1110 * also assign queues to groups */ 1115 * also assign queues to groups
1116 */
1111 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { 1117 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
1112 priv->gfargrp[grp_idx].num_rx_queues = 0x0; 1118 priv->gfargrp[grp_idx].num_rx_queues = 0x0;
1119
1113 for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map, 1120 for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
1114 priv->num_rx_queues) { 1121 priv->num_rx_queues) {
1115 priv->gfargrp[grp_idx].num_rx_queues++; 1122 priv->gfargrp[grp_idx].num_rx_queues++;
1116 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx]; 1123 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
1117 rstat = rstat | (RSTAT_CLEAR_RHALT >> i); 1124 rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
1118 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i); 1125 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
1119 } 1126 }
1120 priv->gfargrp[grp_idx].num_tx_queues = 0x0; 1127 priv->gfargrp[grp_idx].num_tx_queues = 0x0;
1128
1121 for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map, 1129 for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
1122 priv->num_tx_queues) { 1130 priv->num_tx_queues) {
1123 priv->gfargrp[grp_idx].num_tx_queues++; 1131 priv->gfargrp[grp_idx].num_tx_queues++;
1124 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx]; 1132 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
1125 tstat = tstat | (TSTAT_CLEAR_THALT >> i); 1133 tstat = tstat | (TSTAT_CLEAR_THALT >> i);
@@ -1149,7 +1157,7 @@ static int gfar_probe(struct platform_device *ofdev)
1149 priv->rx_queue[i]->rxic = DEFAULT_RXIC; 1157 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1150 } 1158 }
1151 1159
1152 /* always enable rx filer*/ 1160 /* always enable rx filer */
1153 priv->rx_filer_enable = 1; 1161 priv->rx_filer_enable = 1;
1154 /* Enable most messages by default */ 1162 /* Enable most messages by default */
1155 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 1163 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
@@ -1165,7 +1173,8 @@ static int gfar_probe(struct platform_device *ofdev)
1165 } 1173 }
1166 1174
1167 device_init_wakeup(&dev->dev, 1175 device_init_wakeup(&dev->dev,
1168 priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1176 priv->device_flags &
1177 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1169 1178
1170 /* fill out IRQ number and name fields */ 1179 /* fill out IRQ number and name fields */
1171 for (i = 0; i < priv->num_grps; i++) { 1180 for (i = 0; i < priv->num_grps; i++) {
@@ -1189,13 +1198,14 @@ static int gfar_probe(struct platform_device *ofdev)
1189 /* Print out the device info */ 1198 /* Print out the device info */
1190 netdev_info(dev, "mac: %pM\n", dev->dev_addr); 1199 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
1191 1200
1192 /* Even more device info helps when determining which kernel */ 1201 /* Even more device info helps when determining which kernel
1193 /* provided which set of benchmarks. */ 1202 * provided which set of benchmarks.
1203 */
1194 netdev_info(dev, "Running with NAPI enabled\n"); 1204 netdev_info(dev, "Running with NAPI enabled\n");
1195 for (i = 0; i < priv->num_rx_queues; i++) 1205 for (i = 0; i < priv->num_rx_queues; i++)
1196 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n", 1206 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
1197 i, priv->rx_queue[i]->rx_ring_size); 1207 i, priv->rx_queue[i]->rx_ring_size);
1198 for(i = 0; i < priv->num_tx_queues; i++) 1208 for (i = 0; i < priv->num_tx_queues; i++)
1199 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n", 1209 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
1200 i, priv->tx_queue[i]->tx_ring_size); 1210 i, priv->tx_queue[i]->tx_ring_size);
1201 1211
@@ -1242,7 +1252,8 @@ static int gfar_suspend(struct device *dev)
1242 u32 tempval; 1252 u32 tempval;
1243 1253
1244 int magic_packet = priv->wol_en && 1254 int magic_packet = priv->wol_en &&
1245 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1255 (priv->device_flags &
1256 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1246 1257
1247 netif_device_detach(ndev); 1258 netif_device_detach(ndev);
1248 1259
@@ -1294,7 +1305,8 @@ static int gfar_resume(struct device *dev)
1294 unsigned long flags; 1305 unsigned long flags;
1295 u32 tempval; 1306 u32 tempval;
1296 int magic_packet = priv->wol_en && 1307 int magic_packet = priv->wol_en &&
1297 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1308 (priv->device_flags &
1309 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1298 1310
1299 if (!netif_running(ndev)) { 1311 if (!netif_running(ndev)) {
1300 netif_device_attach(ndev); 1312 netif_device_attach(ndev);
@@ -1393,13 +1405,13 @@ static phy_interface_t gfar_get_interface(struct net_device *dev)
1393 } 1405 }
1394 1406
1395 if (ecntrl & ECNTRL_REDUCED_MODE) { 1407 if (ecntrl & ECNTRL_REDUCED_MODE) {
1396 if (ecntrl & ECNTRL_REDUCED_MII_MODE) 1408 if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
1397 return PHY_INTERFACE_MODE_RMII; 1409 return PHY_INTERFACE_MODE_RMII;
1410 }
1398 else { 1411 else {
1399 phy_interface_t interface = priv->interface; 1412 phy_interface_t interface = priv->interface;
1400 1413
1401 /* 1414 /* This isn't autodetected right now, so it must
1402 * This isn't autodetected right now, so it must
1403 * be set by the device tree or platform code. 1415 * be set by the device tree or platform code.
1404 */ 1416 */
1405 if (interface == PHY_INTERFACE_MODE_RGMII_ID) 1417 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
@@ -1453,8 +1465,7 @@ static int init_phy(struct net_device *dev)
1453 return 0; 1465 return 0;
1454} 1466}
1455 1467
1456/* 1468/* Initialize TBI PHY interface for communicating with the
1457 * Initialize TBI PHY interface for communicating with the
1458 * SERDES lynx PHY on the chip. We communicate with this PHY 1469 * SERDES lynx PHY on the chip. We communicate with this PHY
1459 * through the MDIO bus on each controller, treating it as a 1470 * through the MDIO bus on each controller, treating it as a
1460 * "normal" PHY at the address found in the TBIPA register. We assume 1471 * "normal" PHY at the address found in the TBIPA register. We assume
@@ -1479,8 +1490,7 @@ static void gfar_configure_serdes(struct net_device *dev)
1479 return; 1490 return;
1480 } 1491 }
1481 1492
1482 /* 1493 /* If the link is already up, we must already be ok, and don't need to
1483 * If the link is already up, we must already be ok, and don't need to
1484 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured 1494 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1485 * everything for us? Resetting it takes the link down and requires 1495 * everything for us? Resetting it takes the link down and requires
1486 * several seconds for it to come back. 1496 * several seconds for it to come back.
@@ -1492,18 +1502,19 @@ static void gfar_configure_serdes(struct net_device *dev)
1492 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); 1502 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1493 1503
1494 phy_write(tbiphy, MII_ADVERTISE, 1504 phy_write(tbiphy, MII_ADVERTISE,
1495 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | 1505 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1496 ADVERTISE_1000XPSE_ASYM); 1506 ADVERTISE_1000XPSE_ASYM);
1497 1507
1498 phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE | 1508 phy_write(tbiphy, MII_BMCR,
1499 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000); 1509 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1510 BMCR_SPEED1000);
1500} 1511}
1501 1512
1502static void init_registers(struct net_device *dev) 1513static void init_registers(struct net_device *dev)
1503{ 1514{
1504 struct gfar_private *priv = netdev_priv(dev); 1515 struct gfar_private *priv = netdev_priv(dev);
1505 struct gfar __iomem *regs = NULL; 1516 struct gfar __iomem *regs = NULL;
1506 int i = 0; 1517 int i;
1507 1518
1508 for (i = 0; i < priv->num_grps; i++) { 1519 for (i = 0; i < priv->num_grps; i++) {
1509 regs = priv->gfargrp[i].regs; 1520 regs = priv->gfargrp[i].regs;
@@ -1554,15 +1565,13 @@ static int __gfar_is_rx_idle(struct gfar_private *priv)
1554{ 1565{
1555 u32 res; 1566 u32 res;
1556 1567
1557 /* 1568 /* Normaly TSEC should not hang on GRS commands, so we should
1558 * Normaly TSEC should not hang on GRS commands, so we should
1559 * actually wait for IEVENT_GRSC flag. 1569 * actually wait for IEVENT_GRSC flag.
1560 */ 1570 */
1561 if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002))) 1571 if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002)))
1562 return 0; 1572 return 0;
1563 1573
1564 /* 1574 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1565 * Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1566 * the same as bits 23-30, the eTSEC Rx is assumed to be idle 1575 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1567 * and the Rx can be safely reset. 1576 * and the Rx can be safely reset.
1568 */ 1577 */
@@ -1580,7 +1589,7 @@ static void gfar_halt_nodisable(struct net_device *dev)
1580 struct gfar_private *priv = netdev_priv(dev); 1589 struct gfar_private *priv = netdev_priv(dev);
1581 struct gfar __iomem *regs = NULL; 1590 struct gfar __iomem *regs = NULL;
1582 u32 tempval; 1591 u32 tempval;
1583 int i = 0; 1592 int i;
1584 1593
1585 for (i = 0; i < priv->num_grps; i++) { 1594 for (i = 0; i < priv->num_grps; i++) {
1586 regs = priv->gfargrp[i].regs; 1595 regs = priv->gfargrp[i].regs;
@@ -1594,8 +1603,8 @@ static void gfar_halt_nodisable(struct net_device *dev)
1594 regs = priv->gfargrp[0].regs; 1603 regs = priv->gfargrp[0].regs;
1595 /* Stop the DMA, and wait for it to stop */ 1604 /* Stop the DMA, and wait for it to stop */
1596 tempval = gfar_read(&regs->dmactrl); 1605 tempval = gfar_read(&regs->dmactrl);
1597 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) 1606 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
1598 != (DMACTRL_GRS | DMACTRL_GTS)) { 1607 (DMACTRL_GRS | DMACTRL_GTS)) {
1599 int ret; 1608 int ret;
1600 1609
1601 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 1610 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
@@ -1660,7 +1669,7 @@ void stop_gfar(struct net_device *dev)
1660 } else { 1669 } else {
1661 for (i = 0; i < priv->num_grps; i++) 1670 for (i = 0; i < priv->num_grps; i++)
1662 free_irq(priv->gfargrp[i].interruptTransmit, 1671 free_irq(priv->gfargrp[i].interruptTransmit,
1663 &priv->gfargrp[i]); 1672 &priv->gfargrp[i]);
1664 } 1673 }
1665 1674
1666 free_skb_resources(priv); 1675 free_skb_resources(priv);
@@ -1679,13 +1688,13 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1679 continue; 1688 continue;
1680 1689
1681 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr, 1690 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
1682 txbdp->length, DMA_TO_DEVICE); 1691 txbdp->length, DMA_TO_DEVICE);
1683 txbdp->lstatus = 0; 1692 txbdp->lstatus = 0;
1684 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; 1693 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1685 j++) { 1694 j++) {
1686 txbdp++; 1695 txbdp++;
1687 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr, 1696 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
1688 txbdp->length, DMA_TO_DEVICE); 1697 txbdp->length, DMA_TO_DEVICE);
1689 } 1698 }
1690 txbdp++; 1699 txbdp++;
1691 dev_kfree_skb_any(tx_queue->tx_skbuff[i]); 1700 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
@@ -1705,8 +1714,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1705 for (i = 0; i < rx_queue->rx_ring_size; i++) { 1714 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1706 if (rx_queue->rx_skbuff[i]) { 1715 if (rx_queue->rx_skbuff[i]) {
1707 dma_unmap_single(&priv->ofdev->dev, 1716 dma_unmap_single(&priv->ofdev->dev,
1708 rxbdp->bufPtr, priv->rx_buffer_size, 1717 rxbdp->bufPtr, priv->rx_buffer_size,
1709 DMA_FROM_DEVICE); 1718 DMA_FROM_DEVICE);
1710 dev_kfree_skb_any(rx_queue->rx_skbuff[i]); 1719 dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1711 rx_queue->rx_skbuff[i] = NULL; 1720 rx_queue->rx_skbuff[i] = NULL;
1712 } 1721 }
@@ -1718,7 +1727,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1718} 1727}
1719 1728
1720/* If there are any tx skbs or rx skbs still around, free them. 1729/* If there are any tx skbs or rx skbs still around, free them.
1721 * Then free tx_skbuff and rx_skbuff */ 1730 * Then free tx_skbuff and rx_skbuff
1731 */
1722static void free_skb_resources(struct gfar_private *priv) 1732static void free_skb_resources(struct gfar_private *priv)
1723{ 1733{
1724 struct gfar_priv_tx_q *tx_queue = NULL; 1734 struct gfar_priv_tx_q *tx_queue = NULL;
@@ -1728,24 +1738,25 @@ static void free_skb_resources(struct gfar_private *priv)
1728 /* Go through all the buffer descriptors and free their data buffers */ 1738 /* Go through all the buffer descriptors and free their data buffers */
1729 for (i = 0; i < priv->num_tx_queues; i++) { 1739 for (i = 0; i < priv->num_tx_queues; i++) {
1730 struct netdev_queue *txq; 1740 struct netdev_queue *txq;
1741
1731 tx_queue = priv->tx_queue[i]; 1742 tx_queue = priv->tx_queue[i];
1732 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex); 1743 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1733 if(tx_queue->tx_skbuff) 1744 if (tx_queue->tx_skbuff)
1734 free_skb_tx_queue(tx_queue); 1745 free_skb_tx_queue(tx_queue);
1735 netdev_tx_reset_queue(txq); 1746 netdev_tx_reset_queue(txq);
1736 } 1747 }
1737 1748
1738 for (i = 0; i < priv->num_rx_queues; i++) { 1749 for (i = 0; i < priv->num_rx_queues; i++) {
1739 rx_queue = priv->rx_queue[i]; 1750 rx_queue = priv->rx_queue[i];
1740 if(rx_queue->rx_skbuff) 1751 if (rx_queue->rx_skbuff)
1741 free_skb_rx_queue(rx_queue); 1752 free_skb_rx_queue(rx_queue);
1742 } 1753 }
1743 1754
1744 dma_free_coherent(&priv->ofdev->dev, 1755 dma_free_coherent(&priv->ofdev->dev,
1745 sizeof(struct txbd8) * priv->total_tx_ring_size + 1756 sizeof(struct txbd8) * priv->total_tx_ring_size +
1746 sizeof(struct rxbd8) * priv->total_rx_ring_size, 1757 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1747 priv->tx_queue[0]->tx_bd_base, 1758 priv->tx_queue[0]->tx_bd_base,
1748 priv->tx_queue[0]->tx_bd_dma_base); 1759 priv->tx_queue[0]->tx_bd_dma_base);
1749 skb_queue_purge(&priv->rx_recycle); 1760 skb_queue_purge(&priv->rx_recycle);
1750} 1761}
1751 1762
@@ -1784,7 +1795,7 @@ void gfar_start(struct net_device *dev)
1784} 1795}
1785 1796
1786void gfar_configure_coalescing(struct gfar_private *priv, 1797void gfar_configure_coalescing(struct gfar_private *priv,
1787 unsigned long tx_mask, unsigned long rx_mask) 1798 unsigned long tx_mask, unsigned long rx_mask)
1788{ 1799{
1789 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1800 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1790 u32 __iomem *baddr; 1801 u32 __iomem *baddr;
@@ -1794,11 +1805,11 @@ void gfar_configure_coalescing(struct gfar_private *priv,
1794 * multiple queues, there's only single reg to program 1805 * multiple queues, there's only single reg to program
1795 */ 1806 */
1796 gfar_write(&regs->txic, 0); 1807 gfar_write(&regs->txic, 0);
1797 if(likely(priv->tx_queue[0]->txcoalescing)) 1808 if (likely(priv->tx_queue[0]->txcoalescing))
1798 gfar_write(&regs->txic, priv->tx_queue[0]->txic); 1809 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
1799 1810
1800 gfar_write(&regs->rxic, 0); 1811 gfar_write(&regs->rxic, 0);
1801 if(unlikely(priv->rx_queue[0]->rxcoalescing)) 1812 if (unlikely(priv->rx_queue[0]->rxcoalescing))
1802 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic); 1813 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
1803 1814
1804 if (priv->mode == MQ_MG_MODE) { 1815 if (priv->mode == MQ_MG_MODE) {
@@ -1825,12 +1836,14 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
1825 int err; 1836 int err;
1826 1837
1827 /* If the device has multiple interrupts, register for 1838 /* If the device has multiple interrupts, register for
1828 * them. Otherwise, only register for the one */ 1839 * them. Otherwise, only register for the one
1840 */
1829 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1841 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1830 /* Install our interrupt handlers for Error, 1842 /* Install our interrupt handlers for Error,
1831 * Transmit, and Receive */ 1843 * Transmit, and Receive
1832 if ((err = request_irq(grp->interruptError, gfar_error, 0, 1844 */
1833 grp->int_name_er,grp)) < 0) { 1845 if ((err = request_irq(grp->interruptError, gfar_error,
1846 0, grp->int_name_er, grp)) < 0) {
1834 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 1847 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1835 grp->interruptError); 1848 grp->interruptError);
1836 1849
@@ -1838,21 +1851,21 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
1838 } 1851 }
1839 1852
1840 if ((err = request_irq(grp->interruptTransmit, gfar_transmit, 1853 if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
1841 0, grp->int_name_tx, grp)) < 0) { 1854 0, grp->int_name_tx, grp)) < 0) {
1842 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 1855 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1843 grp->interruptTransmit); 1856 grp->interruptTransmit);
1844 goto tx_irq_fail; 1857 goto tx_irq_fail;
1845 } 1858 }
1846 1859
1847 if ((err = request_irq(grp->interruptReceive, gfar_receive, 0, 1860 if ((err = request_irq(grp->interruptReceive, gfar_receive,
1848 grp->int_name_rx, grp)) < 0) { 1861 0, grp->int_name_rx, grp)) < 0) {
1849 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 1862 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1850 grp->interruptReceive); 1863 grp->interruptReceive);
1851 goto rx_irq_fail; 1864 goto rx_irq_fail;
1852 } 1865 }
1853 } else { 1866 } else {
1854 if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0, 1867 if ((err = request_irq(grp->interruptTransmit, gfar_interrupt,
1855 grp->int_name_tx, grp)) < 0) { 1868 0, grp->int_name_tx, grp)) < 0) {
1856 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 1869 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1857 grp->interruptTransmit); 1870 grp->interruptTransmit);
1858 goto err_irq_fail; 1871 goto err_irq_fail;
@@ -1912,8 +1925,9 @@ irq_fail:
1912 return err; 1925 return err;
1913} 1926}
1914 1927
1915/* Called when something needs to use the ethernet device */ 1928/* Called when something needs to use the ethernet device
1916/* Returns 0 for success. */ 1929 * Returns 0 for success.
1930 */
1917static int gfar_enet_open(struct net_device *dev) 1931static int gfar_enet_open(struct net_device *dev)
1918{ 1932{
1919 struct gfar_private *priv = netdev_priv(dev); 1933 struct gfar_private *priv = netdev_priv(dev);
@@ -1958,18 +1972,17 @@ static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
1958} 1972}
1959 1973
1960static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb, 1974static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
1961 int fcb_length) 1975 int fcb_length)
1962{ 1976{
1963 u8 flags = 0;
1964
1965 /* If we're here, it's a IP packet with a TCP or UDP 1977 /* If we're here, it's a IP packet with a TCP or UDP
1966 * payload. We set it to checksum, using a pseudo-header 1978 * payload. We set it to checksum, using a pseudo-header
1967 * we provide 1979 * we provide
1968 */ 1980 */
1969 flags = TXFCB_DEFAULT; 1981 u8 flags = TXFCB_DEFAULT;
1970 1982
1971 /* Tell the controller what the protocol is */ 1983 /* Tell the controller what the protocol is
1972 /* And provide the already calculated phcs */ 1984 * And provide the already calculated phcs
1985 */
1973 if (ip_hdr(skb)->protocol == IPPROTO_UDP) { 1986 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
1974 flags |= TXFCB_UDP; 1987 flags |= TXFCB_UDP;
1975 fcb->phcs = udp_hdr(skb)->check; 1988 fcb->phcs = udp_hdr(skb)->check;
@@ -1979,7 +1992,8 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
1979 /* l3os is the distance between the start of the 1992 /* l3os is the distance between the start of the
1980 * frame (skb->data) and the start of the IP hdr. 1993 * frame (skb->data) and the start of the IP hdr.
1981 * l4os is the distance between the start of the 1994 * l4os is the distance between the start of the
1982 * l3 hdr and the l4 hdr */ 1995 * l3 hdr and the l4 hdr
1996 */
1983 fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length); 1997 fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length);
1984 fcb->l4os = skb_network_header_len(skb); 1998 fcb->l4os = skb_network_header_len(skb);
1985 1999
@@ -1993,7 +2007,7 @@ void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
1993} 2007}
1994 2008
1995static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, 2009static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
1996 struct txbd8 *base, int ring_size) 2010 struct txbd8 *base, int ring_size)
1997{ 2011{
1998 struct txbd8 *new_bd = bdp + stride; 2012 struct txbd8 *new_bd = bdp + stride;
1999 2013
@@ -2001,13 +2015,14 @@ static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
2001} 2015}
2002 2016
2003static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, 2017static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
2004 int ring_size) 2018 int ring_size)
2005{ 2019{
2006 return skip_txbd(bdp, 1, base, ring_size); 2020 return skip_txbd(bdp, 1, base, ring_size);
2007} 2021}
2008 2022
2009/* This is called by the kernel when a frame is ready for transmission. */ 2023/* This is called by the kernel when a frame is ready for transmission.
2010/* It is pointed to by the dev->hard_start_xmit function pointer */ 2024 * It is pointed to by the dev->hard_start_xmit function pointer
2025 */
2011static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) 2026static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2012{ 2027{
2013 struct gfar_private *priv = netdev_priv(dev); 2028 struct gfar_private *priv = netdev_priv(dev);
@@ -2022,13 +2037,12 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2022 unsigned long flags; 2037 unsigned long flags;
2023 unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN; 2038 unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN;
2024 2039
2025 /* 2040 /* TOE=1 frames larger than 2500 bytes may see excess delays
2026 * TOE=1 frames larger than 2500 bytes may see excess delays
2027 * before start of transmission. 2041 * before start of transmission.
2028 */ 2042 */
2029 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) && 2043 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
2030 skb->ip_summed == CHECKSUM_PARTIAL && 2044 skb->ip_summed == CHECKSUM_PARTIAL &&
2031 skb->len > 2500)) { 2045 skb->len > 2500)) {
2032 int ret; 2046 int ret;
2033 2047
2034 ret = skb_checksum_help(skb); 2048 ret = skb_checksum_help(skb);
@@ -2044,16 +2058,16 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2044 2058
2045 /* check if time stamp should be generated */ 2059 /* check if time stamp should be generated */
2046 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 2060 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
2047 priv->hwts_tx_en)) { 2061 priv->hwts_tx_en)) {
2048 do_tstamp = 1; 2062 do_tstamp = 1;
2049 fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN; 2063 fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2050 } 2064 }
2051 2065
2052 /* make space for additional header when fcb is needed */ 2066 /* make space for additional header when fcb is needed */
2053 if (((skb->ip_summed == CHECKSUM_PARTIAL) || 2067 if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
2054 vlan_tx_tag_present(skb) || 2068 vlan_tx_tag_present(skb) ||
2055 unlikely(do_tstamp)) && 2069 unlikely(do_tstamp)) &&
2056 (skb_headroom(skb) < fcb_length)) { 2070 (skb_headroom(skb) < fcb_length)) {
2057 struct sk_buff *skb_new; 2071 struct sk_buff *skb_new;
2058 2072
2059 skb_new = skb_realloc_headroom(skb, fcb_length); 2073 skb_new = skb_realloc_headroom(skb, fcb_length);
@@ -2096,12 +2110,12 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2096 /* Time stamp insertion requires one additional TxBD */ 2110 /* Time stamp insertion requires one additional TxBD */
2097 if (unlikely(do_tstamp)) 2111 if (unlikely(do_tstamp))
2098 txbdp_tstamp = txbdp = next_txbd(txbdp, base, 2112 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2099 tx_queue->tx_ring_size); 2113 tx_queue->tx_ring_size);
2100 2114
2101 if (nr_frags == 0) { 2115 if (nr_frags == 0) {
2102 if (unlikely(do_tstamp)) 2116 if (unlikely(do_tstamp))
2103 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST | 2117 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
2104 TXBD_INTERRUPT); 2118 TXBD_INTERRUPT);
2105 else 2119 else
2106 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 2120 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2107 } else { 2121 } else {
@@ -2113,7 +2127,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2113 length = skb_shinfo(skb)->frags[i].size; 2127 length = skb_shinfo(skb)->frags[i].size;
2114 2128
2115 lstatus = txbdp->lstatus | length | 2129 lstatus = txbdp->lstatus | length |
2116 BD_LFLAG(TXBD_READY); 2130 BD_LFLAG(TXBD_READY);
2117 2131
2118 /* Handle the last BD specially */ 2132 /* Handle the last BD specially */
2119 if (i == nr_frags - 1) 2133 if (i == nr_frags - 1)
@@ -2143,8 +2157,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2143 if (CHECKSUM_PARTIAL == skb->ip_summed) { 2157 if (CHECKSUM_PARTIAL == skb->ip_summed) {
2144 fcb = gfar_add_fcb(skb); 2158 fcb = gfar_add_fcb(skb);
2145 /* as specified by errata */ 2159 /* as specified by errata */
2146 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12) 2160 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12) &&
2147 && ((unsigned long)fcb % 0x20) > 0x18)) { 2161 ((unsigned long)fcb % 0x20) > 0x18)) {
2148 __skb_pull(skb, GMAC_FCB_LEN); 2162 __skb_pull(skb, GMAC_FCB_LEN);
2149 skb_checksum_help(skb); 2163 skb_checksum_help(skb);
2150 } else { 2164 } else {
@@ -2172,10 +2186,9 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2172 } 2186 }
2173 2187
2174 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, 2188 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
2175 skb_headlen(skb), DMA_TO_DEVICE); 2189 skb_headlen(skb), DMA_TO_DEVICE);
2176 2190
2177 /* 2191 /* If time stamping is requested one additional TxBD must be set up. The
2178 * If time stamping is requested one additional TxBD must be set up. The
2179 * first TxBD points to the FCB and must have a data length of 2192 * first TxBD points to the FCB and must have a data length of
2180 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with 2193 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2181 * the full frame length. 2194 * the full frame length.
@@ -2183,7 +2196,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2183 if (unlikely(do_tstamp)) { 2196 if (unlikely(do_tstamp)) {
2184 txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length; 2197 txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length;
2185 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) | 2198 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
2186 (skb_headlen(skb) - fcb_length); 2199 (skb_headlen(skb) - fcb_length);
2187 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; 2200 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2188 } else { 2201 } else {
2189 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); 2202 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
@@ -2191,8 +2204,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2191 2204
2192 netdev_tx_sent_queue(txq, skb->len); 2205 netdev_tx_sent_queue(txq, skb->len);
2193 2206
2194 /* 2207 /* We can work in parallel with gfar_clean_tx_ring(), except
2195 * We can work in parallel with gfar_clean_tx_ring(), except
2196 * when modifying num_txbdfree. Note that we didn't grab the lock 2208 * when modifying num_txbdfree. Note that we didn't grab the lock
2197 * when we were reading the num_txbdfree and checking for available 2209 * when we were reading the num_txbdfree and checking for available
2198 * space, that's because outside of this function it can only grow, 2210 * space, that's because outside of this function it can only grow,
@@ -2205,8 +2217,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2205 */ 2217 */
2206 spin_lock_irqsave(&tx_queue->txlock, flags); 2218 spin_lock_irqsave(&tx_queue->txlock, flags);
2207 2219
2208 /* 2220 /* The powerpc-specific eieio() is used, as wmb() has too strong
2209 * The powerpc-specific eieio() is used, as wmb() has too strong
2210 * semantics (it requires synchronization between cacheable and 2221 * semantics (it requires synchronization between cacheable and
2211 * uncacheable mappings, which eieio doesn't provide and which we 2222 * uncacheable mappings, which eieio doesn't provide and which we
2212 * don't need), thus requiring a more expensive sync instruction. At 2223 * don't need), thus requiring a more expensive sync instruction. At
@@ -2222,9 +2233,10 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2222 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; 2233 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2223 2234
2224 /* Update the current skb pointer to the next entry we will use 2235 /* Update the current skb pointer to the next entry we will use
2225 * (wrapping if necessary) */ 2236 * (wrapping if necessary)
2237 */
2226 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & 2238 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2227 TX_RING_MOD_MASK(tx_queue->tx_ring_size); 2239 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
2228 2240
2229 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); 2241 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2230 2242
@@ -2232,7 +2244,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2232 tx_queue->num_txbdfree -= (nr_txbds); 2244 tx_queue->num_txbdfree -= (nr_txbds);
2233 2245
2234 /* If the next BD still needs to be cleaned up, then the bds 2246 /* If the next BD still needs to be cleaned up, then the bds
2235 are full. We need to tell the kernel to stop sending us stuff. */ 2247 * are full. We need to tell the kernel to stop sending us stuff.
2248 */
2236 if (!tx_queue->num_txbdfree) { 2249 if (!tx_queue->num_txbdfree) {
2237 netif_tx_stop_queue(txq); 2250 netif_tx_stop_queue(txq);
2238 2251
@@ -2357,12 +2370,12 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2357 2370
2358 frame_size += priv->padding; 2371 frame_size += priv->padding;
2359 2372
2360 tempsize = 2373 tempsize = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
2361 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + 2374 INCREMENTAL_BUFFER_SIZE;
2362 INCREMENTAL_BUFFER_SIZE;
2363 2375
2364 /* Only stop and start the controller if it isn't already 2376 /* Only stop and start the controller if it isn't already
2365 * stopped, and we changed something */ 2377 * stopped, and we changed something
2378 */
2366 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 2379 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2367 stop_gfar(dev); 2380 stop_gfar(dev);
2368 2381
@@ -2375,11 +2388,12 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2375 2388
2376 /* If the mtu is larger than the max size for standard 2389 /* If the mtu is larger than the max size for standard
2377 * ethernet frames (ie, a jumbo frame), then set maccfg2 2390 * ethernet frames (ie, a jumbo frame), then set maccfg2
2378 * to allow huge frames, and to check the length */ 2391 * to allow huge frames, and to check the length
2392 */
2379 tempval = gfar_read(&regs->maccfg2); 2393 tempval = gfar_read(&regs->maccfg2);
2380 2394
2381 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || 2395 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
2382 gfar_has_errata(priv, GFAR_ERRATA_74)) 2396 gfar_has_errata(priv, GFAR_ERRATA_74))
2383 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 2397 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2384 else 2398 else
2385 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 2399 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
@@ -2400,7 +2414,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2400static void gfar_reset_task(struct work_struct *work) 2414static void gfar_reset_task(struct work_struct *work)
2401{ 2415{
2402 struct gfar_private *priv = container_of(work, struct gfar_private, 2416 struct gfar_private *priv = container_of(work, struct gfar_private,
2403 reset_task); 2417 reset_task);
2404 struct net_device *dev = priv->ndev; 2418 struct net_device *dev = priv->ndev;
2405 2419
2406 if (dev->flags & IFF_UP) { 2420 if (dev->flags & IFF_UP) {
@@ -2427,7 +2441,7 @@ static void gfar_align_skb(struct sk_buff *skb)
2427 * as many bytes as needed to align the data properly 2441 * as many bytes as needed to align the data properly
2428 */ 2442 */
2429 skb_reserve(skb, RXBUF_ALIGNMENT - 2443 skb_reserve(skb, RXBUF_ALIGNMENT -
2430 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1))); 2444 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
2431} 2445}
2432 2446
2433/* Interrupt Handler for Transmit complete */ 2447/* Interrupt Handler for Transmit complete */
@@ -2461,8 +2475,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2461 2475
2462 frags = skb_shinfo(skb)->nr_frags; 2476 frags = skb_shinfo(skb)->nr_frags;
2463 2477
2464 /* 2478 /* When time stamping, one additional TxBD must be freed.
2465 * When time stamping, one additional TxBD must be freed.
2466 * Also, we need to dma_unmap_single() the TxPAL. 2479 * Also, we need to dma_unmap_single() the TxPAL.
2467 */ 2480 */
2468 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) 2481 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
@@ -2476,7 +2489,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2476 2489
2477 /* Only clean completed frames */ 2490 /* Only clean completed frames */
2478 if ((lstatus & BD_LFLAG(TXBD_READY)) && 2491 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2479 (lstatus & BD_LENGTH_MASK)) 2492 (lstatus & BD_LENGTH_MASK))
2480 break; 2493 break;
2481 2494
2482 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { 2495 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
@@ -2486,11 +2499,12 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2486 buflen = bdp->length; 2499 buflen = bdp->length;
2487 2500
2488 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, 2501 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2489 buflen, DMA_TO_DEVICE); 2502 buflen, DMA_TO_DEVICE);
2490 2503
2491 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { 2504 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2492 struct skb_shared_hwtstamps shhwtstamps; 2505 struct skb_shared_hwtstamps shhwtstamps;
2493 u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7); 2506 u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2507
2494 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 2508 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2495 shhwtstamps.hwtstamp = ns_to_ktime(*ns); 2509 shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2496 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN); 2510 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
@@ -2503,23 +2517,20 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2503 bdp = next_txbd(bdp, base, tx_ring_size); 2517 bdp = next_txbd(bdp, base, tx_ring_size);
2504 2518
2505 for (i = 0; i < frags; i++) { 2519 for (i = 0; i < frags; i++) {
2506 dma_unmap_page(&priv->ofdev->dev, 2520 dma_unmap_page(&priv->ofdev->dev, bdp->bufPtr,
2507 bdp->bufPtr, 2521 bdp->length, DMA_TO_DEVICE);
2508 bdp->length,
2509 DMA_TO_DEVICE);
2510 bdp->lstatus &= BD_LFLAG(TXBD_WRAP); 2522 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2511 bdp = next_txbd(bdp, base, tx_ring_size); 2523 bdp = next_txbd(bdp, base, tx_ring_size);
2512 } 2524 }
2513 2525
2514 bytes_sent += skb->len; 2526 bytes_sent += skb->len;
2515 2527
2516 /* 2528 /* If there's room in the queue (limit it to rx_buffer_size)
2517 * If there's room in the queue (limit it to rx_buffer_size)
2518 * we add this skb back into the pool, if it's the right size 2529 * we add this skb back into the pool, if it's the right size
2519 */ 2530 */
2520 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size && 2531 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
2521 skb_recycle_check(skb, priv->rx_buffer_size + 2532 skb_recycle_check(skb, priv->rx_buffer_size +
2522 RXBUF_ALIGNMENT)) { 2533 RXBUF_ALIGNMENT)) {
2523 gfar_align_skb(skb); 2534 gfar_align_skb(skb);
2524 skb_queue_head(&priv->rx_recycle, skb); 2535 skb_queue_head(&priv->rx_recycle, skb);
2525 } else 2536 } else
@@ -2528,7 +2539,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2528 tx_queue->tx_skbuff[skb_dirtytx] = NULL; 2539 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2529 2540
2530 skb_dirtytx = (skb_dirtytx + 1) & 2541 skb_dirtytx = (skb_dirtytx + 1) &
2531 TX_RING_MOD_MASK(tx_ring_size); 2542 TX_RING_MOD_MASK(tx_ring_size);
2532 2543
2533 howmany++; 2544 howmany++;
2534 spin_lock_irqsave(&tx_queue->txlock, flags); 2545 spin_lock_irqsave(&tx_queue->txlock, flags);
@@ -2558,8 +2569,7 @@ static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
2558 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED); 2569 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
2559 __napi_schedule(&gfargrp->napi); 2570 __napi_schedule(&gfargrp->napi);
2560 } else { 2571 } else {
2561 /* 2572 /* Clear IEVENT, so interrupts aren't called again
2562 * Clear IEVENT, so interrupts aren't called again
2563 * because of the packets that have already arrived. 2573 * because of the packets that have already arrived.
2564 */ 2574 */
2565 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK); 2575 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
@@ -2576,7 +2586,7 @@ static irqreturn_t gfar_transmit(int irq, void *grp_id)
2576} 2586}
2577 2587
2578static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, 2588static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2579 struct sk_buff *skb) 2589 struct sk_buff *skb)
2580{ 2590{
2581 struct net_device *dev = rx_queue->dev; 2591 struct net_device *dev = rx_queue->dev;
2582 struct gfar_private *priv = netdev_priv(dev); 2592 struct gfar_private *priv = netdev_priv(dev);
@@ -2587,7 +2597,7 @@ static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2587 gfar_init_rxbdp(rx_queue, bdp, buf); 2597 gfar_init_rxbdp(rx_queue, bdp, buf);
2588} 2598}
2589 2599
2590static struct sk_buff * gfar_alloc_skb(struct net_device *dev) 2600static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
2591{ 2601{
2592 struct gfar_private *priv = netdev_priv(dev); 2602 struct gfar_private *priv = netdev_priv(dev);
2593 struct sk_buff *skb = NULL; 2603 struct sk_buff *skb = NULL;
@@ -2601,7 +2611,7 @@ static struct sk_buff * gfar_alloc_skb(struct net_device *dev)
2601 return skb; 2611 return skb;
2602} 2612}
2603 2613
2604struct sk_buff * gfar_new_skb(struct net_device *dev) 2614struct sk_buff *gfar_new_skb(struct net_device *dev)
2605{ 2615{
2606 struct gfar_private *priv = netdev_priv(dev); 2616 struct gfar_private *priv = netdev_priv(dev);
2607 struct sk_buff *skb = NULL; 2617 struct sk_buff *skb = NULL;
@@ -2619,8 +2629,7 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
2619 struct net_device_stats *stats = &dev->stats; 2629 struct net_device_stats *stats = &dev->stats;
2620 struct gfar_extra_stats *estats = &priv->extra_stats; 2630 struct gfar_extra_stats *estats = &priv->extra_stats;
2621 2631
2622 /* If the packet was truncated, none of the other errors 2632 /* If the packet was truncated, none of the other errors matter */
2623 * matter */
2624 if (status & RXBD_TRUNCATED) { 2633 if (status & RXBD_TRUNCATED) {
2625 stats->rx_length_errors++; 2634 stats->rx_length_errors++;
2626 2635
@@ -2661,7 +2670,8 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2661{ 2670{
2662 /* If valid headers were found, and valid sums 2671 /* If valid headers were found, and valid sums
2663 * were verified, then we tell the kernel that no 2672 * were verified, then we tell the kernel that no
2664 * checksumming is necessary. Otherwise, it is */ 2673 * checksumming is necessary. Otherwise, it is [FIXME]
2674 */
2665 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) 2675 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
2666 skb->ip_summed = CHECKSUM_UNNECESSARY; 2676 skb->ip_summed = CHECKSUM_UNNECESSARY;
2667 else 2677 else
@@ -2669,8 +2679,7 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2669} 2679}
2670 2680
2671 2681
2672/* gfar_process_frame() -- handle one incoming packet if skb 2682/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
2673 * isn't NULL. */
2674static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 2683static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2675 int amount_pull, struct napi_struct *napi) 2684 int amount_pull, struct napi_struct *napi)
2676{ 2685{
@@ -2682,8 +2691,9 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2682 /* fcb is at the beginning if exists */ 2691 /* fcb is at the beginning if exists */
2683 fcb = (struct rxfcb *)skb->data; 2692 fcb = (struct rxfcb *)skb->data;
2684 2693
2685 /* Remove the FCB from the skb */ 2694 /* Remove the FCB from the skb
2686 /* Remove the padded bytes, if there are any */ 2695 * Remove the padded bytes, if there are any
2696 */
2687 if (amount_pull) { 2697 if (amount_pull) {
2688 skb_record_rx_queue(skb, fcb->rq); 2698 skb_record_rx_queue(skb, fcb->rq);
2689 skb_pull(skb, amount_pull); 2699 skb_pull(skb, amount_pull);
@@ -2693,6 +2703,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2693 if (priv->hwts_rx_en) { 2703 if (priv->hwts_rx_en) {
2694 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 2704 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2695 u64 *ns = (u64 *) skb->data; 2705 u64 *ns = (u64 *) skb->data;
2706
2696 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 2707 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2697 shhwtstamps->hwtstamp = ns_to_ktime(*ns); 2708 shhwtstamps->hwtstamp = ns_to_ktime(*ns);
2698 } 2709 }
@@ -2706,8 +2717,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2706 /* Tell the skb what kind of packet this is */ 2717 /* Tell the skb what kind of packet this is */
2707 skb->protocol = eth_type_trans(skb, dev); 2718 skb->protocol = eth_type_trans(skb, dev);
2708 2719
2709 /* 2720 /* There's need to check for NETIF_F_HW_VLAN_RX here.
2710 * There's need to check for NETIF_F_HW_VLAN_RX here.
2711 * Even if vlan rx accel is disabled, on some chips 2721 * Even if vlan rx accel is disabled, on some chips
2712 * RXFCB_VLN is pseudo randomly set. 2722 * RXFCB_VLN is pseudo randomly set.
2713 */ 2723 */
@@ -2725,8 +2735,8 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2725} 2735}
2726 2736
2727/* gfar_clean_rx_ring() -- Processes each frame in the rx ring 2737/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2728 * until the budget/quota has been reached. Returns the number 2738 * until the budget/quota has been reached. Returns the number
2729 * of frames handled 2739 * of frames handled
2730 */ 2740 */
2731int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) 2741int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2732{ 2742{
@@ -2746,6 +2756,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2746 2756
2747 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { 2757 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
2748 struct sk_buff *newskb; 2758 struct sk_buff *newskb;
2759
2749 rmb(); 2760 rmb();
2750 2761
2751 /* Add another skb for the future */ 2762 /* Add another skb for the future */
@@ -2754,15 +2765,15 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2754 skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; 2765 skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
2755 2766
2756 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, 2767 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2757 priv->rx_buffer_size, DMA_FROM_DEVICE); 2768 priv->rx_buffer_size, DMA_FROM_DEVICE);
2758 2769
2759 if (unlikely(!(bdp->status & RXBD_ERR) && 2770 if (unlikely(!(bdp->status & RXBD_ERR) &&
2760 bdp->length > priv->rx_buffer_size)) 2771 bdp->length > priv->rx_buffer_size))
2761 bdp->status = RXBD_LARGE; 2772 bdp->status = RXBD_LARGE;
2762 2773
2763 /* We drop the frame if we failed to allocate a new buffer */ 2774 /* We drop the frame if we failed to allocate a new buffer */
2764 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) || 2775 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
2765 bdp->status & RXBD_ERR)) { 2776 bdp->status & RXBD_ERR)) {
2766 count_errors(bdp->status, dev); 2777 count_errors(bdp->status, dev);
2767 2778
2768 if (unlikely(!newskb)) 2779 if (unlikely(!newskb))
@@ -2781,7 +2792,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2781 rx_queue->stats.rx_bytes += pkt_len; 2792 rx_queue->stats.rx_bytes += pkt_len;
2782 skb_record_rx_queue(skb, rx_queue->qindex); 2793 skb_record_rx_queue(skb, rx_queue->qindex);
2783 gfar_process_frame(dev, skb, amount_pull, 2794 gfar_process_frame(dev, skb, amount_pull,
2784 &rx_queue->grp->napi); 2795 &rx_queue->grp->napi);
2785 2796
2786 } else { 2797 } else {
2787 netif_warn(priv, rx_err, dev, "Missing skb!\n"); 2798 netif_warn(priv, rx_err, dev, "Missing skb!\n");
@@ -2800,9 +2811,8 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2800 bdp = next_bd(bdp, base, rx_queue->rx_ring_size); 2811 bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
2801 2812
2802 /* update to point at the next skb */ 2813 /* update to point at the next skb */
2803 rx_queue->skb_currx = 2814 rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
2804 (rx_queue->skb_currx + 1) & 2815 RX_RING_MOD_MASK(rx_queue->rx_ring_size);
2805 RX_RING_MOD_MASK(rx_queue->rx_ring_size);
2806 } 2816 }
2807 2817
2808 /* Update the current rxbd pointer to be the next one */ 2818 /* Update the current rxbd pointer to be the next one */
@@ -2813,8 +2823,8 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2813 2823
2814static int gfar_poll(struct napi_struct *napi, int budget) 2824static int gfar_poll(struct napi_struct *napi, int budget)
2815{ 2825{
2816 struct gfar_priv_grp *gfargrp = container_of(napi, 2826 struct gfar_priv_grp *gfargrp =
2817 struct gfar_priv_grp, napi); 2827 container_of(napi, struct gfar_priv_grp, napi);
2818 struct gfar_private *priv = gfargrp->priv; 2828 struct gfar_private *priv = gfargrp->priv;
2819 struct gfar __iomem *regs = gfargrp->regs; 2829 struct gfar __iomem *regs = gfargrp->regs;
2820 struct gfar_priv_tx_q *tx_queue = NULL; 2830 struct gfar_priv_tx_q *tx_queue = NULL;
@@ -2828,11 +2838,11 @@ static int gfar_poll(struct napi_struct *napi, int budget)
2828 budget_per_queue = budget/num_queues; 2838 budget_per_queue = budget/num_queues;
2829 2839
2830 /* Clear IEVENT, so interrupts aren't called again 2840 /* Clear IEVENT, so interrupts aren't called again
2831 * because of the packets that have already arrived */ 2841 * because of the packets that have already arrived
2842 */
2832 gfar_write(&regs->ievent, IEVENT_RTX_MASK); 2843 gfar_write(&regs->ievent, IEVENT_RTX_MASK);
2833 2844
2834 while (num_queues && left_over_budget) { 2845 while (num_queues && left_over_budget) {
2835
2836 budget_per_queue = left_over_budget/num_queues; 2846 budget_per_queue = left_over_budget/num_queues;
2837 left_over_budget = 0; 2847 left_over_budget = 0;
2838 2848
@@ -2843,12 +2853,13 @@ static int gfar_poll(struct napi_struct *napi, int budget)
2843 tx_queue = priv->tx_queue[rx_queue->qindex]; 2853 tx_queue = priv->tx_queue[rx_queue->qindex];
2844 2854
2845 tx_cleaned += gfar_clean_tx_ring(tx_queue); 2855 tx_cleaned += gfar_clean_tx_ring(tx_queue);
2846 rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue, 2856 rx_cleaned_per_queue =
2847 budget_per_queue); 2857 gfar_clean_rx_ring(rx_queue, budget_per_queue);
2848 rx_cleaned += rx_cleaned_per_queue; 2858 rx_cleaned += rx_cleaned_per_queue;
2849 if(rx_cleaned_per_queue < budget_per_queue) { 2859 if (rx_cleaned_per_queue < budget_per_queue) {
2850 left_over_budget = left_over_budget + 2860 left_over_budget = left_over_budget +
2851 (budget_per_queue - rx_cleaned_per_queue); 2861 (budget_per_queue -
2862 rx_cleaned_per_queue);
2852 set_bit(i, &serviced_queues); 2863 set_bit(i, &serviced_queues);
2853 num_queues--; 2864 num_queues--;
2854 } 2865 }
@@ -2866,25 +2877,25 @@ static int gfar_poll(struct napi_struct *napi, int budget)
2866 2877
2867 gfar_write(&regs->imask, IMASK_DEFAULT); 2878 gfar_write(&regs->imask, IMASK_DEFAULT);
2868 2879
2869 /* If we are coalescing interrupts, update the timer */ 2880 /* If we are coalescing interrupts, update the timer
2870 /* Otherwise, clear it */ 2881 * Otherwise, clear it
2871 gfar_configure_coalescing(priv, 2882 */
2872 gfargrp->rx_bit_map, gfargrp->tx_bit_map); 2883 gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
2884 gfargrp->tx_bit_map);
2873 } 2885 }
2874 2886
2875 return rx_cleaned; 2887 return rx_cleaned;
2876} 2888}
2877 2889
2878#ifdef CONFIG_NET_POLL_CONTROLLER 2890#ifdef CONFIG_NET_POLL_CONTROLLER
2879/* 2891/* Polling 'interrupt' - used by things like netconsole to send skbs
2880 * Polling 'interrupt' - used by things like netconsole to send skbs
2881 * without having to re-enable interrupts. It's not called while 2892 * without having to re-enable interrupts. It's not called while
2882 * the interrupt routine is executing. 2893 * the interrupt routine is executing.
2883 */ 2894 */
2884static void gfar_netpoll(struct net_device *dev) 2895static void gfar_netpoll(struct net_device *dev)
2885{ 2896{
2886 struct gfar_private *priv = netdev_priv(dev); 2897 struct gfar_private *priv = netdev_priv(dev);
2887 int i = 0; 2898 int i;
2888 2899
2889 /* If the device has multiple interrupts, run tx/rx */ 2900 /* If the device has multiple interrupts, run tx/rx */
2890 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 2901 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
@@ -2893,7 +2904,7 @@ static void gfar_netpoll(struct net_device *dev)
2893 disable_irq(priv->gfargrp[i].interruptReceive); 2904 disable_irq(priv->gfargrp[i].interruptReceive);
2894 disable_irq(priv->gfargrp[i].interruptError); 2905 disable_irq(priv->gfargrp[i].interruptError);
2895 gfar_interrupt(priv->gfargrp[i].interruptTransmit, 2906 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2896 &priv->gfargrp[i]); 2907 &priv->gfargrp[i]);
2897 enable_irq(priv->gfargrp[i].interruptError); 2908 enable_irq(priv->gfargrp[i].interruptError);
2898 enable_irq(priv->gfargrp[i].interruptReceive); 2909 enable_irq(priv->gfargrp[i].interruptReceive);
2899 enable_irq(priv->gfargrp[i].interruptTransmit); 2910 enable_irq(priv->gfargrp[i].interruptTransmit);
@@ -2902,7 +2913,7 @@ static void gfar_netpoll(struct net_device *dev)
2902 for (i = 0; i < priv->num_grps; i++) { 2913 for (i = 0; i < priv->num_grps; i++) {
2903 disable_irq(priv->gfargrp[i].interruptTransmit); 2914 disable_irq(priv->gfargrp[i].interruptTransmit);
2904 gfar_interrupt(priv->gfargrp[i].interruptTransmit, 2915 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2905 &priv->gfargrp[i]); 2916 &priv->gfargrp[i]);
2906 enable_irq(priv->gfargrp[i].interruptTransmit); 2917 enable_irq(priv->gfargrp[i].interruptTransmit);
2907 } 2918 }
2908 } 2919 }
@@ -2954,7 +2965,8 @@ static void adjust_link(struct net_device *dev)
2954 u32 ecntrl = gfar_read(&regs->ecntrl); 2965 u32 ecntrl = gfar_read(&regs->ecntrl);
2955 2966
2956 /* Now we make sure that we can be in full duplex mode. 2967 /* Now we make sure that we can be in full duplex mode.
2957 * If not, we operate in half-duplex mode. */ 2968 * If not, we operate in half-duplex mode.
2969 */
2958 if (phydev->duplex != priv->oldduplex) { 2970 if (phydev->duplex != priv->oldduplex) {
2959 new_state = 1; 2971 new_state = 1;
2960 if (!(phydev->duplex)) 2972 if (!(phydev->duplex))
@@ -2980,7 +2992,8 @@ static void adjust_link(struct net_device *dev)
2980 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); 2992 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
2981 2993
2982 /* Reduced mode distinguishes 2994 /* Reduced mode distinguishes
2983 * between 10 and 100 */ 2995 * between 10 and 100
2996 */
2984 if (phydev->speed == SPEED_100) 2997 if (phydev->speed == SPEED_100)
2985 ecntrl |= ECNTRL_R100; 2998 ecntrl |= ECNTRL_R100;
2986 else 2999 else
@@ -3019,7 +3032,8 @@ static void adjust_link(struct net_device *dev)
3019/* Update the hash table based on the current list of multicast 3032/* Update the hash table based on the current list of multicast
3020 * addresses we subscribe to. Also, change the promiscuity of 3033 * addresses we subscribe to. Also, change the promiscuity of
3021 * the device based on the flags (this function is called 3034 * the device based on the flags (this function is called
3022 * whenever dev->flags is changed */ 3035 * whenever dev->flags is changed
3036 */
3023static void gfar_set_multi(struct net_device *dev) 3037static void gfar_set_multi(struct net_device *dev)
3024{ 3038{
3025 struct netdev_hw_addr *ha; 3039 struct netdev_hw_addr *ha;
@@ -3081,7 +3095,8 @@ static void gfar_set_multi(struct net_device *dev)
3081 3095
3082 /* If we have extended hash tables, we need to 3096 /* If we have extended hash tables, we need to
3083 * clear the exact match registers to prepare for 3097 * clear the exact match registers to prepare for
3084 * setting them */ 3098 * setting them
3099 */
3085 if (priv->extended_hash) { 3100 if (priv->extended_hash) {
3086 em_num = GFAR_EM_NUM + 1; 3101 em_num = GFAR_EM_NUM + 1;
3087 gfar_clear_exact_match(dev); 3102 gfar_clear_exact_match(dev);
@@ -3107,13 +3122,14 @@ static void gfar_set_multi(struct net_device *dev)
3107 3122
3108 3123
3109/* Clears each of the exact match registers to zero, so they 3124/* Clears each of the exact match registers to zero, so they
3110 * don't interfere with normal reception */ 3125 * don't interfere with normal reception
3126 */
3111static void gfar_clear_exact_match(struct net_device *dev) 3127static void gfar_clear_exact_match(struct net_device *dev)
3112{ 3128{
3113 int idx; 3129 int idx;
3114 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; 3130 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
3115 3131
3116 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++) 3132 for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
3117 gfar_set_mac_for_addr(dev, idx, zero_arr); 3133 gfar_set_mac_for_addr(dev, idx, zero_arr);
3118} 3134}
3119 3135
@@ -3129,7 +3145,8 @@ static void gfar_clear_exact_match(struct net_device *dev)
3129 * hash index which gaddr register to use, and the 5 other bits 3145 * hash index which gaddr register to use, and the 5 other bits
3130 * indicate which bit (assuming an IBM numbering scheme, which 3146 * indicate which bit (assuming an IBM numbering scheme, which
3131 * for PowerPC (tm) is usually the case) in the register holds 3147 * for PowerPC (tm) is usually the case) in the register holds
3132 * the entry. */ 3148 * the entry.
3149 */
3133static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) 3150static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3134{ 3151{
3135 u32 tempval; 3152 u32 tempval;
@@ -3161,8 +3178,9 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3161 3178
3162 macptr += num*2; 3179 macptr += num*2;
3163 3180
3164 /* Now copy it into the mac registers backwards, cuz */ 3181 /* Now copy it into the mac registers backwards, cuz
3165 /* little endian is silly */ 3182 * little endian is silly
3183 */
3166 for (idx = 0; idx < ETH_ALEN; idx++) 3184 for (idx = 0; idx < ETH_ALEN; idx++)
3167 tmpbuf[ETH_ALEN - 1 - idx] = addr[idx]; 3185 tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
3168 3186
@@ -3194,7 +3212,8 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
3194 3212
3195 /* Hmm... */ 3213 /* Hmm... */
3196 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) 3214 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
3197 netdev_dbg(dev, "error interrupt (ievent=0x%08x imask=0x%08x)\n", 3215 netdev_dbg(dev,
3216 "error interrupt (ievent=0x%08x imask=0x%08x)\n",
3198 events, gfar_read(&regs->imask)); 3217 events, gfar_read(&regs->imask));
3199 3218
3200 /* Update the error counters */ 3219 /* Update the error counters */
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 8a025570d97e..8971921cc1c8 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -46,18 +46,24 @@
46#include "gianfar.h" 46#include "gianfar.h"
47 47
48extern void gfar_start(struct net_device *dev); 48extern void gfar_start(struct net_device *dev);
49extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); 49extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
50 int rx_work_limit);
50 51
51#define GFAR_MAX_COAL_USECS 0xffff 52#define GFAR_MAX_COAL_USECS 0xffff
52#define GFAR_MAX_COAL_FRAMES 0xff 53#define GFAR_MAX_COAL_FRAMES 0xff
53static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, 54static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
54 u64 * buf); 55 u64 *buf);
55static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf); 56static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf);
56static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals); 57static int gfar_gcoalesce(struct net_device *dev,
57static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals); 58 struct ethtool_coalesce *cvals);
58static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals); 59static int gfar_scoalesce(struct net_device *dev,
59static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals); 60 struct ethtool_coalesce *cvals);
60static void gfar_gdrvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo); 61static void gfar_gringparam(struct net_device *dev,
62 struct ethtool_ringparam *rvals);
63static int gfar_sringparam(struct net_device *dev,
64 struct ethtool_ringparam *rvals);
65static void gfar_gdrvinfo(struct net_device *dev,
66 struct ethtool_drvinfo *drvinfo);
61 67
62static const char stat_gstrings[][ETH_GSTRING_LEN] = { 68static const char stat_gstrings[][ETH_GSTRING_LEN] = {
63 "rx-dropped-by-kernel", 69 "rx-dropped-by-kernel",
@@ -130,14 +136,15 @@ static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
130 memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN); 136 memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
131 else 137 else
132 memcpy(buf, stat_gstrings, 138 memcpy(buf, stat_gstrings,
133 GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN); 139 GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
134} 140}
135 141
136/* Fill in an array of 64-bit statistics from various sources. 142/* Fill in an array of 64-bit statistics from various sources.
137 * This array will be appended to the end of the ethtool_stats 143 * This array will be appended to the end of the ethtool_stats
138 * structure, and returned to user space 144 * structure, and returned to user space
139 */ 145 */
140static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 * buf) 146static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
147 u64 *buf)
141{ 148{
142 int i; 149 int i;
143 struct gfar_private *priv = netdev_priv(dev); 150 struct gfar_private *priv = netdev_priv(dev);
@@ -174,8 +181,8 @@ static int gfar_sset_count(struct net_device *dev, int sset)
174} 181}
175 182
176/* Fills in the drvinfo structure with some basic info */ 183/* Fills in the drvinfo structure with some basic info */
177static void gfar_gdrvinfo(struct net_device *dev, struct 184static void gfar_gdrvinfo(struct net_device *dev,
178 ethtool_drvinfo *drvinfo) 185 struct ethtool_drvinfo *drvinfo)
179{ 186{
180 strncpy(drvinfo->driver, DRV_NAME, GFAR_INFOSTR_LEN); 187 strncpy(drvinfo->driver, DRV_NAME, GFAR_INFOSTR_LEN);
181 strncpy(drvinfo->version, gfar_driver_version, GFAR_INFOSTR_LEN); 188 strncpy(drvinfo->version, gfar_driver_version, GFAR_INFOSTR_LEN);
@@ -226,7 +233,8 @@ static int gfar_reglen(struct net_device *dev)
226} 233}
227 234
228/* Return a dump of the GFAR register space */ 235/* Return a dump of the GFAR register space */
229static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf) 236static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs,
237 void *regbuf)
230{ 238{
231 int i; 239 int i;
232 struct gfar_private *priv = netdev_priv(dev); 240 struct gfar_private *priv = netdev_priv(dev);
@@ -239,7 +247,8 @@ static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, voi
239 247
240/* Convert microseconds to ethernet clock ticks, which changes 248/* Convert microseconds to ethernet clock ticks, which changes
241 * depending on what speed the controller is running at */ 249 * depending on what speed the controller is running at */
242static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int usecs) 250static unsigned int gfar_usecs2ticks(struct gfar_private *priv,
251 unsigned int usecs)
243{ 252{
244 unsigned int count; 253 unsigned int count;
245 254
@@ -263,7 +272,8 @@ static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int use
263} 272}
264 273
265/* Convert ethernet clock ticks to microseconds */ 274/* Convert ethernet clock ticks to microseconds */
266static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int ticks) 275static unsigned int gfar_ticks2usecs(struct gfar_private *priv,
276 unsigned int ticks)
267{ 277{
268 unsigned int count; 278 unsigned int count;
269 279
@@ -288,7 +298,8 @@ static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int tic
288 298
289/* Get the coalescing parameters, and put them in the cvals 299/* Get the coalescing parameters, and put them in the cvals
290 * structure. */ 300 * structure. */
291static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals) 301static int gfar_gcoalesce(struct net_device *dev,
302 struct ethtool_coalesce *cvals)
292{ 303{
293 struct gfar_private *priv = netdev_priv(dev); 304 struct gfar_private *priv = netdev_priv(dev);
294 struct gfar_priv_rx_q *rx_queue = NULL; 305 struct gfar_priv_rx_q *rx_queue = NULL;
@@ -353,7 +364,8 @@ static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
353 * Both cvals->*_usecs and cvals->*_frames have to be > 0 364 * Both cvals->*_usecs and cvals->*_frames have to be > 0
354 * in order for coalescing to be active 365 * in order for coalescing to be active
355 */ 366 */
356static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals) 367static int gfar_scoalesce(struct net_device *dev,
368 struct ethtool_coalesce *cvals)
357{ 369{
358 struct gfar_private *priv = netdev_priv(dev); 370 struct gfar_private *priv = netdev_priv(dev);
359 int i = 0; 371 int i = 0;
@@ -364,7 +376,8 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
364 /* Set up rx coalescing */ 376 /* Set up rx coalescing */
365 /* As of now, we will enable/disable coalescing for all 377 /* As of now, we will enable/disable coalescing for all
366 * queues together in case of eTSEC2, this will be modified 378 * queues together in case of eTSEC2, this will be modified
367 * along with the ethtool interface */ 379 * along with the ethtool interface
380 */
368 if ((cvals->rx_coalesce_usecs == 0) || 381 if ((cvals->rx_coalesce_usecs == 0) ||
369 (cvals->rx_max_coalesced_frames == 0)) { 382 (cvals->rx_max_coalesced_frames == 0)) {
370 for (i = 0; i < priv->num_rx_queues; i++) 383 for (i = 0; i < priv->num_rx_queues; i++)
@@ -433,7 +446,8 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
433/* Fills in rvals with the current ring parameters. Currently, 446/* Fills in rvals with the current ring parameters. Currently,
434 * rx, rx_mini, and rx_jumbo rings are the same size, as mini and 447 * rx, rx_mini, and rx_jumbo rings are the same size, as mini and
435 * jumbo are ignored by the driver */ 448 * jumbo are ignored by the driver */
436static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals) 449static void gfar_gringparam(struct net_device *dev,
450 struct ethtool_ringparam *rvals)
437{ 451{
438 struct gfar_private *priv = netdev_priv(dev); 452 struct gfar_private *priv = netdev_priv(dev);
439 struct gfar_priv_tx_q *tx_queue = NULL; 453 struct gfar_priv_tx_q *tx_queue = NULL;
@@ -459,8 +473,10 @@ static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rv
459/* Change the current ring parameters, stopping the controller if 473/* Change the current ring parameters, stopping the controller if
460 * necessary so that we don't mess things up while we're in 474 * necessary so that we don't mess things up while we're in
461 * motion. We wait for the ring to be clean before reallocating 475 * motion. We wait for the ring to be clean before reallocating
462 * the rings. */ 476 * the rings.
463static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals) 477 */
478static int gfar_sringparam(struct net_device *dev,
479 struct ethtool_ringparam *rvals)
464{ 480{
465 struct gfar_private *priv = netdev_priv(dev); 481 struct gfar_private *priv = netdev_priv(dev);
466 int err = 0, i = 0; 482 int err = 0, i = 0;
@@ -486,7 +502,8 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
486 unsigned long flags; 502 unsigned long flags;
487 503
488 /* Halt TX and RX, and process the frames which 504 /* Halt TX and RX, and process the frames which
489 * have already been received */ 505 * have already been received
506 */
490 local_irq_save(flags); 507 local_irq_save(flags);
491 lock_tx_qs(priv); 508 lock_tx_qs(priv);
492 lock_rx_qs(priv); 509 lock_rx_qs(priv);
@@ -499,7 +516,7 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
499 516
500 for (i = 0; i < priv->num_rx_queues; i++) 517 for (i = 0; i < priv->num_rx_queues; i++)
501 gfar_clean_rx_ring(priv->rx_queue[i], 518 gfar_clean_rx_ring(priv->rx_queue[i],
502 priv->rx_queue[i]->rx_ring_size); 519 priv->rx_queue[i]->rx_ring_size);
503 520
504 /* Now we take down the rings to rebuild them */ 521 /* Now we take down the rings to rebuild them */
505 stop_gfar(dev); 522 stop_gfar(dev);
@@ -509,7 +526,8 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
509 for (i = 0; i < priv->num_rx_queues; i++) { 526 for (i = 0; i < priv->num_rx_queues; i++) {
510 priv->rx_queue[i]->rx_ring_size = rvals->rx_pending; 527 priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
511 priv->tx_queue[i]->tx_ring_size = rvals->tx_pending; 528 priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
512 priv->tx_queue[i]->num_txbdfree = priv->tx_queue[i]->tx_ring_size; 529 priv->tx_queue[i]->num_txbdfree =
530 priv->tx_queue[i]->tx_ring_size;
513 } 531 }
514 532
515 /* Rebuild the rings with the new size */ 533 /* Rebuild the rings with the new size */
@@ -535,7 +553,8 @@ int gfar_set_features(struct net_device *dev, netdev_features_t features)
535 553
536 if (dev->flags & IFF_UP) { 554 if (dev->flags & IFF_UP) {
537 /* Halt TX and RX, and process the frames which 555 /* Halt TX and RX, and process the frames which
538 * have already been received */ 556 * have already been received
557 */
539 local_irq_save(flags); 558 local_irq_save(flags);
540 lock_tx_qs(priv); 559 lock_tx_qs(priv);
541 lock_rx_qs(priv); 560 lock_rx_qs(priv);
@@ -548,7 +567,7 @@ int gfar_set_features(struct net_device *dev, netdev_features_t features)
548 567
549 for (i = 0; i < priv->num_rx_queues; i++) 568 for (i = 0; i < priv->num_rx_queues; i++)
550 gfar_clean_rx_ring(priv->rx_queue[i], 569 gfar_clean_rx_ring(priv->rx_queue[i],
551 priv->rx_queue[i]->rx_ring_size); 570 priv->rx_queue[i]->rx_ring_size);
552 571
553 /* Now we take down the rings to rebuild them */ 572 /* Now we take down the rings to rebuild them */
554 stop_gfar(dev); 573 stop_gfar(dev);
@@ -564,12 +583,14 @@ int gfar_set_features(struct net_device *dev, netdev_features_t features)
564static uint32_t gfar_get_msglevel(struct net_device *dev) 583static uint32_t gfar_get_msglevel(struct net_device *dev)
565{ 584{
566 struct gfar_private *priv = netdev_priv(dev); 585 struct gfar_private *priv = netdev_priv(dev);
586
567 return priv->msg_enable; 587 return priv->msg_enable;
568} 588}
569 589
570static void gfar_set_msglevel(struct net_device *dev, uint32_t data) 590static void gfar_set_msglevel(struct net_device *dev, uint32_t data)
571{ 591{
572 struct gfar_private *priv = netdev_priv(dev); 592 struct gfar_private *priv = netdev_priv(dev);
593
573 priv->msg_enable = data; 594 priv->msg_enable = data;
574} 595}
575 596
@@ -614,14 +635,14 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
614 635
615 if (ethflow & RXH_L2DA) { 636 if (ethflow & RXH_L2DA) {
616 fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH | 637 fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
617 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0; 638 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
618 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 639 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
619 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 640 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
620 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 641 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
621 priv->cur_filer_idx = priv->cur_filer_idx - 1; 642 priv->cur_filer_idx = priv->cur_filer_idx - 1;
622 643
623 fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH | 644 fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
624 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0; 645 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
625 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 646 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
626 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 647 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
627 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 648 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -630,7 +651,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
630 651
631 if (ethflow & RXH_VLAN) { 652 if (ethflow & RXH_VLAN) {
632 fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH | 653 fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
633 RQFCR_AND | RQFCR_HASHTBL_0; 654 RQFCR_AND | RQFCR_HASHTBL_0;
634 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 655 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
635 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 656 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
636 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 657 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
@@ -639,7 +660,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
639 660
640 if (ethflow & RXH_IP_SRC) { 661 if (ethflow & RXH_IP_SRC) {
641 fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH | 662 fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
642 RQFCR_AND | RQFCR_HASHTBL_0; 663 RQFCR_AND | RQFCR_HASHTBL_0;
643 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 664 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
644 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 665 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
645 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 666 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -648,7 +669,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
648 669
649 if (ethflow & (RXH_IP_DST)) { 670 if (ethflow & (RXH_IP_DST)) {
650 fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH | 671 fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
651 RQFCR_AND | RQFCR_HASHTBL_0; 672 RQFCR_AND | RQFCR_HASHTBL_0;
652 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 673 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
653 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 674 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
654 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 675 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -657,7 +678,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
657 678
658 if (ethflow & RXH_L3_PROTO) { 679 if (ethflow & RXH_L3_PROTO) {
659 fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH | 680 fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
660 RQFCR_AND | RQFCR_HASHTBL_0; 681 RQFCR_AND | RQFCR_HASHTBL_0;
661 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 682 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
662 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 683 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
663 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 684 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -666,7 +687,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
666 687
667 if (ethflow & RXH_L4_B_0_1) { 688 if (ethflow & RXH_L4_B_0_1) {
668 fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH | 689 fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
669 RQFCR_AND | RQFCR_HASHTBL_0; 690 RQFCR_AND | RQFCR_HASHTBL_0;
670 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 691 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
671 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 692 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
672 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 693 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -675,7 +696,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
675 696
676 if (ethflow & RXH_L4_B_2_3) { 697 if (ethflow & RXH_L4_B_2_3) {
677 fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH | 698 fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
678 RQFCR_AND | RQFCR_HASHTBL_0; 699 RQFCR_AND | RQFCR_HASHTBL_0;
679 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 700 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
680 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 701 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
681 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 702 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -683,7 +704,8 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
683 } 704 }
684} 705}
685 706
686static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u64 class) 707static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
708 u64 class)
687{ 709{
688 unsigned int last_rule_idx = priv->cur_filer_idx; 710 unsigned int last_rule_idx = priv->cur_filer_idx;
689 unsigned int cmp_rqfpr; 711 unsigned int cmp_rqfpr;
@@ -694,9 +716,9 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
694 int ret = 1; 716 int ret = 1;
695 717
696 local_rqfpr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1), 718 local_rqfpr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1),
697 GFP_KERNEL); 719 GFP_KERNEL);
698 local_rqfcr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1), 720 local_rqfcr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1),
699 GFP_KERNEL); 721 GFP_KERNEL);
700 if (!local_rqfpr || !local_rqfcr) { 722 if (!local_rqfpr || !local_rqfcr) {
701 pr_err("Out of memory\n"); 723 pr_err("Out of memory\n");
702 ret = 0; 724 ret = 0;
@@ -726,9 +748,9 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
726 local_rqfpr[j] = priv->ftp_rqfpr[i]; 748 local_rqfpr[j] = priv->ftp_rqfpr[i];
727 local_rqfcr[j] = priv->ftp_rqfcr[i]; 749 local_rqfcr[j] = priv->ftp_rqfcr[i];
728 j--; 750 j--;
729 if ((priv->ftp_rqfcr[i] == (RQFCR_PID_PARSE | 751 if ((priv->ftp_rqfcr[i] ==
730 RQFCR_CLE |RQFCR_AND)) && 752 (RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND)) &&
731 (priv->ftp_rqfpr[i] == cmp_rqfpr)) 753 (priv->ftp_rqfpr[i] == cmp_rqfpr))
732 break; 754 break;
733 } 755 }
734 756
@@ -743,12 +765,12 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
743 */ 765 */
744 for (l = i+1; l < MAX_FILER_IDX; l++) { 766 for (l = i+1; l < MAX_FILER_IDX; l++) {
745 if ((priv->ftp_rqfcr[l] & RQFCR_CLE) && 767 if ((priv->ftp_rqfcr[l] & RQFCR_CLE) &&
746 !(priv->ftp_rqfcr[l] & RQFCR_AND)) { 768 !(priv->ftp_rqfcr[l] & RQFCR_AND)) {
747 priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT | 769 priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
748 RQFCR_HASHTBL_0 | RQFCR_PID_MASK; 770 RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
749 priv->ftp_rqfpr[l] = FPR_FILER_MASK; 771 priv->ftp_rqfpr[l] = FPR_FILER_MASK;
750 gfar_write_filer(priv, l, priv->ftp_rqfcr[l], 772 gfar_write_filer(priv, l, priv->ftp_rqfcr[l],
751 priv->ftp_rqfpr[l]); 773 priv->ftp_rqfpr[l]);
752 break; 774 break;
753 } 775 }
754 776
@@ -773,7 +795,7 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
773 priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k]; 795 priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
774 priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k]; 796 priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
775 gfar_write_filer(priv, priv->cur_filer_idx, 797 gfar_write_filer(priv, priv->cur_filer_idx,
776 local_rqfcr[k], local_rqfpr[k]); 798 local_rqfcr[k], local_rqfpr[k]);
777 if (!priv->cur_filer_idx) 799 if (!priv->cur_filer_idx)
778 break; 800 break;
779 priv->cur_filer_idx = priv->cur_filer_idx - 1; 801 priv->cur_filer_idx = priv->cur_filer_idx - 1;
@@ -785,7 +807,8 @@ err:
785 return ret; 807 return ret;
786} 808}
787 809
788static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd) 810static int gfar_set_hash_opts(struct gfar_private *priv,
811 struct ethtool_rxnfc *cmd)
789{ 812{
790 /* write the filer rules here */ 813 /* write the filer rules here */
791 if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type)) 814 if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
@@ -810,10 +833,10 @@ static int gfar_check_filer_hardware(struct gfar_private *priv)
810 i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM; 833 i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM;
811 if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) { 834 if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) {
812 netdev_info(priv->ndev, 835 netdev_info(priv->ndev,
813 "Receive Queue Filtering enabled\n"); 836 "Receive Queue Filtering enabled\n");
814 } else { 837 } else {
815 netdev_warn(priv->ndev, 838 netdev_warn(priv->ndev,
816 "Receive Queue Filtering disabled\n"); 839 "Receive Queue Filtering disabled\n");
817 return -EOPNOTSUPP; 840 return -EOPNOTSUPP;
818 } 841 }
819 } 842 }
@@ -823,16 +846,17 @@ static int gfar_check_filer_hardware(struct gfar_private *priv)
823 i &= RCTRL_PRSDEP_MASK; 846 i &= RCTRL_PRSDEP_MASK;
824 if (i == RCTRL_PRSDEP_MASK) { 847 if (i == RCTRL_PRSDEP_MASK) {
825 netdev_info(priv->ndev, 848 netdev_info(priv->ndev,
826 "Receive Queue Filtering enabled\n"); 849 "Receive Queue Filtering enabled\n");
827 } else { 850 } else {
828 netdev_warn(priv->ndev, 851 netdev_warn(priv->ndev,
829 "Receive Queue Filtering disabled\n"); 852 "Receive Queue Filtering disabled\n");
830 return -EOPNOTSUPP; 853 return -EOPNOTSUPP;
831 } 854 }
832 } 855 }
833 856
834 /* Sets the properties for arbitrary filer rule 857 /* Sets the properties for arbitrary filer rule
835 * to the first 4 Layer 4 Bytes */ 858 * to the first 4 Layer 4 Bytes
859 */
836 regs->rbifx = 0xC0C1C2C3; 860 regs->rbifx = 0xC0C1C2C3;
837 return 0; 861 return 0;
838} 862}
@@ -870,14 +894,14 @@ static void gfar_set_mask(u32 mask, struct filer_table *tab)
870static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab) 894static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab)
871{ 895{
872 gfar_set_mask(mask, tab); 896 gfar_set_mask(mask, tab);
873 tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE 897 tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE |
874 | RQFCR_AND; 898 RQFCR_AND;
875 tab->fe[tab->index].prop = value; 899 tab->fe[tab->index].prop = value;
876 tab->index++; 900 tab->index++;
877} 901}
878 902
879static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag, 903static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
880 struct filer_table *tab) 904 struct filer_table *tab)
881{ 905{
882 gfar_set_mask(mask, tab); 906 gfar_set_mask(mask, tab);
883 tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag; 907 tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag;
@@ -885,8 +909,7 @@ static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
885 tab->index++; 909 tab->index++;
886} 910}
887 911
888/* 912/* For setting a tuple of value and mask of type flag
889 * For setting a tuple of value and mask of type flag
890 * Example: 913 * Example:
891 * IP-Src = 10.0.0.0/255.0.0.0 914 * IP-Src = 10.0.0.0/255.0.0.0
892 * value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4 915 * value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4
@@ -901,7 +924,7 @@ static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
901 * Further the all masks are one-padded for better hardware efficiency. 924 * Further the all masks are one-padded for better hardware efficiency.
902 */ 925 */
903static void gfar_set_attribute(u32 value, u32 mask, u32 flag, 926static void gfar_set_attribute(u32 value, u32 mask, u32 flag,
904 struct filer_table *tab) 927 struct filer_table *tab)
905{ 928{
906 switch (flag) { 929 switch (flag) {
907 /* 3bit */ 930 /* 3bit */
@@ -959,7 +982,8 @@ static void gfar_set_attribute(u32 value, u32 mask, u32 flag,
959 982
960/* Translates value and mask for UDP, TCP or SCTP */ 983/* Translates value and mask for UDP, TCP or SCTP */
961static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value, 984static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
962 struct ethtool_tcpip4_spec *mask, struct filer_table *tab) 985 struct ethtool_tcpip4_spec *mask,
986 struct filer_table *tab)
963{ 987{
964 gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab); 988 gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
965 gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab); 989 gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
@@ -970,97 +994,92 @@ static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
970 994
971/* Translates value and mask for RAW-IP4 */ 995/* Translates value and mask for RAW-IP4 */
972static void gfar_set_user_ip(struct ethtool_usrip4_spec *value, 996static void gfar_set_user_ip(struct ethtool_usrip4_spec *value,
973 struct ethtool_usrip4_spec *mask, struct filer_table *tab) 997 struct ethtool_usrip4_spec *mask,
998 struct filer_table *tab)
974{ 999{
975 gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab); 1000 gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
976 gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab); 1001 gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
977 gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab); 1002 gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
978 gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab); 1003 gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab);
979 gfar_set_attribute(value->l4_4_bytes, mask->l4_4_bytes, RQFCR_PID_ARB, 1004 gfar_set_attribute(value->l4_4_bytes, mask->l4_4_bytes, RQFCR_PID_ARB,
980 tab); 1005 tab);
981 1006
982} 1007}
983 1008
984/* Translates value and mask for ETHER spec */ 1009/* Translates value and mask for ETHER spec */
985static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask, 1010static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask,
986 struct filer_table *tab) 1011 struct filer_table *tab)
987{ 1012{
988 u32 upper_temp_mask = 0; 1013 u32 upper_temp_mask = 0;
989 u32 lower_temp_mask = 0; 1014 u32 lower_temp_mask = 0;
1015
990 /* Source address */ 1016 /* Source address */
991 if (!is_broadcast_ether_addr(mask->h_source)) { 1017 if (!is_broadcast_ether_addr(mask->h_source)) {
992
993 if (is_zero_ether_addr(mask->h_source)) { 1018 if (is_zero_ether_addr(mask->h_source)) {
994 upper_temp_mask = 0xFFFFFFFF; 1019 upper_temp_mask = 0xFFFFFFFF;
995 lower_temp_mask = 0xFFFFFFFF; 1020 lower_temp_mask = 0xFFFFFFFF;
996 } else { 1021 } else {
997 upper_temp_mask = mask->h_source[0] << 16 1022 upper_temp_mask = mask->h_source[0] << 16 |
998 | mask->h_source[1] << 8 1023 mask->h_source[1] << 8 |
999 | mask->h_source[2]; 1024 mask->h_source[2];
1000 lower_temp_mask = mask->h_source[3] << 16 1025 lower_temp_mask = mask->h_source[3] << 16 |
1001 | mask->h_source[4] << 8 1026 mask->h_source[4] << 8 |
1002 | mask->h_source[5]; 1027 mask->h_source[5];
1003 } 1028 }
1004 /* Upper 24bit */ 1029 /* Upper 24bit */
1005 gfar_set_attribute( 1030 gfar_set_attribute(value->h_source[0] << 16 |
1006 value->h_source[0] << 16 | value->h_source[1] 1031 value->h_source[1] << 8 |
1007 << 8 | value->h_source[2], 1032 value->h_source[2],
1008 upper_temp_mask, RQFCR_PID_SAH, tab); 1033 upper_temp_mask, RQFCR_PID_SAH, tab);
1009 /* And the same for the lower part */ 1034 /* And the same for the lower part */
1010 gfar_set_attribute( 1035 gfar_set_attribute(value->h_source[3] << 16 |
1011 value->h_source[3] << 16 | value->h_source[4] 1036 value->h_source[4] << 8 |
1012 << 8 | value->h_source[5], 1037 value->h_source[5],
1013 lower_temp_mask, RQFCR_PID_SAL, tab); 1038 lower_temp_mask, RQFCR_PID_SAL, tab);
1014 } 1039 }
1015 /* Destination address */ 1040 /* Destination address */
1016 if (!is_broadcast_ether_addr(mask->h_dest)) { 1041 if (!is_broadcast_ether_addr(mask->h_dest)) {
1017
1018 /* Special for destination is limited broadcast */ 1042 /* Special for destination is limited broadcast */
1019 if ((is_broadcast_ether_addr(value->h_dest) 1043 if ((is_broadcast_ether_addr(value->h_dest) &&
1020 && is_zero_ether_addr(mask->h_dest))) { 1044 is_zero_ether_addr(mask->h_dest))) {
1021 gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab); 1045 gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab);
1022 } else { 1046 } else {
1023
1024 if (is_zero_ether_addr(mask->h_dest)) { 1047 if (is_zero_ether_addr(mask->h_dest)) {
1025 upper_temp_mask = 0xFFFFFFFF; 1048 upper_temp_mask = 0xFFFFFFFF;
1026 lower_temp_mask = 0xFFFFFFFF; 1049 lower_temp_mask = 0xFFFFFFFF;
1027 } else { 1050 } else {
1028 upper_temp_mask = mask->h_dest[0] << 16 1051 upper_temp_mask = mask->h_dest[0] << 16 |
1029 | mask->h_dest[1] << 8 1052 mask->h_dest[1] << 8 |
1030 | mask->h_dest[2]; 1053 mask->h_dest[2];
1031 lower_temp_mask = mask->h_dest[3] << 16 1054 lower_temp_mask = mask->h_dest[3] << 16 |
1032 | mask->h_dest[4] << 8 1055 mask->h_dest[4] << 8 |
1033 | mask->h_dest[5]; 1056 mask->h_dest[5];
1034 } 1057 }
1035 1058
1036 /* Upper 24bit */ 1059 /* Upper 24bit */
1037 gfar_set_attribute( 1060 gfar_set_attribute(value->h_dest[0] << 16 |
1038 value->h_dest[0] << 16 1061 value->h_dest[1] << 8 |
1039 | value->h_dest[1] << 8 1062 value->h_dest[2],
1040 | value->h_dest[2], 1063 upper_temp_mask, RQFCR_PID_DAH, tab);
1041 upper_temp_mask, RQFCR_PID_DAH, tab);
1042 /* And the same for the lower part */ 1064 /* And the same for the lower part */
1043 gfar_set_attribute( 1065 gfar_set_attribute(value->h_dest[3] << 16 |
1044 value->h_dest[3] << 16 1066 value->h_dest[4] << 8 |
1045 | value->h_dest[4] << 8 1067 value->h_dest[5],
1046 | value->h_dest[5], 1068 lower_temp_mask, RQFCR_PID_DAL, tab);
1047 lower_temp_mask, RQFCR_PID_DAL, tab);
1048 } 1069 }
1049 } 1070 }
1050 1071
1051 gfar_set_attribute(value->h_proto, mask->h_proto, RQFCR_PID_ETY, tab); 1072 gfar_set_attribute(value->h_proto, mask->h_proto, RQFCR_PID_ETY, tab);
1052
1053} 1073}
1054 1074
1055/* Convert a rule to binary filter format of gianfar */ 1075/* Convert a rule to binary filter format of gianfar */
1056static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule, 1076static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
1057 struct filer_table *tab) 1077 struct filer_table *tab)
1058{ 1078{
1059 u32 vlan = 0, vlan_mask = 0; 1079 u32 vlan = 0, vlan_mask = 0;
1060 u32 id = 0, id_mask = 0; 1080 u32 id = 0, id_mask = 0;
1061 u32 cfi = 0, cfi_mask = 0; 1081 u32 cfi = 0, cfi_mask = 0;
1062 u32 prio = 0, prio_mask = 0; 1082 u32 prio = 0, prio_mask = 0;
1063
1064 u32 old_index = tab->index; 1083 u32 old_index = tab->index;
1065 1084
1066 /* Check if vlan is wanted */ 1085 /* Check if vlan is wanted */
@@ -1076,13 +1095,16 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
1076 id_mask = rule->m_ext.vlan_tci & VLAN_VID_MASK; 1095 id_mask = rule->m_ext.vlan_tci & VLAN_VID_MASK;
1077 cfi = rule->h_ext.vlan_tci & VLAN_CFI_MASK; 1096 cfi = rule->h_ext.vlan_tci & VLAN_CFI_MASK;
1078 cfi_mask = rule->m_ext.vlan_tci & VLAN_CFI_MASK; 1097 cfi_mask = rule->m_ext.vlan_tci & VLAN_CFI_MASK;
1079 prio = (rule->h_ext.vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; 1098 prio = (rule->h_ext.vlan_tci & VLAN_PRIO_MASK) >>
1080 prio_mask = (rule->m_ext.vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; 1099 VLAN_PRIO_SHIFT;
1100 prio_mask = (rule->m_ext.vlan_tci & VLAN_PRIO_MASK) >>
1101 VLAN_PRIO_SHIFT;
1081 1102
1082 if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) { 1103 if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
1083 vlan |= RQFPR_CFI; 1104 vlan |= RQFPR_CFI;
1084 vlan_mask |= RQFPR_CFI; 1105 vlan_mask |= RQFPR_CFI;
1085 } else if (cfi != VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) { 1106 } else if (cfi != VLAN_TAG_PRESENT &&
1107 cfi_mask == VLAN_TAG_PRESENT) {
1086 vlan_mask |= RQFPR_CFI; 1108 vlan_mask |= RQFPR_CFI;
1087 } 1109 }
1088 } 1110 }
@@ -1090,34 +1112,36 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
1090 switch (rule->flow_type & ~FLOW_EXT) { 1112 switch (rule->flow_type & ~FLOW_EXT) {
1091 case TCP_V4_FLOW: 1113 case TCP_V4_FLOW:
1092 gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan, 1114 gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan,
1093 RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab); 1115 RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab);
1094 gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec, 1116 gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec,
1095 &rule->m_u.tcp_ip4_spec, tab); 1117 &rule->m_u.tcp_ip4_spec, tab);
1096 break; 1118 break;
1097 case UDP_V4_FLOW: 1119 case UDP_V4_FLOW:
1098 gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan, 1120 gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan,
1099 RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab); 1121 RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab);
1100 gfar_set_basic_ip(&rule->h_u.udp_ip4_spec, 1122 gfar_set_basic_ip(&rule->h_u.udp_ip4_spec,
1101 &rule->m_u.udp_ip4_spec, tab); 1123 &rule->m_u.udp_ip4_spec, tab);
1102 break; 1124 break;
1103 case SCTP_V4_FLOW: 1125 case SCTP_V4_FLOW:
1104 gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask, 1126 gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
1105 tab); 1127 tab);
1106 gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab); 1128 gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab);
1107 gfar_set_basic_ip((struct ethtool_tcpip4_spec *) &rule->h_u, 1129 gfar_set_basic_ip((struct ethtool_tcpip4_spec *)&rule->h_u,
1108 (struct ethtool_tcpip4_spec *) &rule->m_u, tab); 1130 (struct ethtool_tcpip4_spec *)&rule->m_u,
1131 tab);
1109 break; 1132 break;
1110 case IP_USER_FLOW: 1133 case IP_USER_FLOW:
1111 gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask, 1134 gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
1112 tab); 1135 tab);
1113 gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u, 1136 gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u,
1114 (struct ethtool_usrip4_spec *) &rule->m_u, tab); 1137 (struct ethtool_usrip4_spec *) &rule->m_u,
1138 tab);
1115 break; 1139 break;
1116 case ETHER_FLOW: 1140 case ETHER_FLOW:
1117 if (vlan) 1141 if (vlan)
1118 gfar_set_parse_bits(vlan, vlan_mask, tab); 1142 gfar_set_parse_bits(vlan, vlan_mask, tab);
1119 gfar_set_ether((struct ethhdr *) &rule->h_u, 1143 gfar_set_ether((struct ethhdr *) &rule->h_u,
1120 (struct ethhdr *) &rule->m_u, tab); 1144 (struct ethhdr *) &rule->m_u, tab);
1121 break; 1145 break;
1122 default: 1146 default:
1123 return -1; 1147 return -1;
@@ -1152,7 +1176,9 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
1152 tab->fe[tab->index - 1].ctrl |= RQFCR_CLE; 1176 tab->fe[tab->index - 1].ctrl |= RQFCR_CLE;
1153 } 1177 }
1154 1178
1155 /* In rare cases the cache can be full while there is free space in hw */ 1179 /* In rare cases the cache can be full while there is
1180 * free space in hw
1181 */
1156 if (tab->index > MAX_FILER_CACHE_IDX - 1) 1182 if (tab->index > MAX_FILER_CACHE_IDX - 1)
1157 return -EBUSY; 1183 return -EBUSY;
1158 1184
@@ -1161,7 +1187,7 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
1161 1187
1162/* Copy size filer entries */ 1188/* Copy size filer entries */
1163static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0], 1189static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
1164 struct gfar_filer_entry src[0], s32 size) 1190 struct gfar_filer_entry src[0], s32 size)
1165{ 1191{
1166 while (size > 0) { 1192 while (size > 0) {
1167 size--; 1193 size--;
@@ -1171,10 +1197,12 @@ static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
1171} 1197}
1172 1198
1173/* Delete the contents of the filer-table between start and end 1199/* Delete the contents of the filer-table between start and end
1174 * and collapse them */ 1200 * and collapse them
1201 */
1175static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab) 1202static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
1176{ 1203{
1177 int length; 1204 int length;
1205
1178 if (end > MAX_FILER_CACHE_IDX || end < begin) 1206 if (end > MAX_FILER_CACHE_IDX || end < begin)
1179 return -EINVAL; 1207 return -EINVAL;
1180 1208
@@ -1200,14 +1228,14 @@ static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
1200 1228
1201/* Make space on the wanted location */ 1229/* Make space on the wanted location */
1202static int gfar_expand_filer_entries(u32 begin, u32 length, 1230static int gfar_expand_filer_entries(u32 begin, u32 length,
1203 struct filer_table *tab) 1231 struct filer_table *tab)
1204{ 1232{
1205 if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX || begin 1233 if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX ||
1206 > MAX_FILER_CACHE_IDX) 1234 begin > MAX_FILER_CACHE_IDX)
1207 return -EINVAL; 1235 return -EINVAL;
1208 1236
1209 gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]), 1237 gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
1210 tab->index - length + 1); 1238 tab->index - length + 1);
1211 1239
1212 tab->index += length; 1240 tab->index += length;
1213 return 0; 1241 return 0;
@@ -1215,9 +1243,10 @@ static int gfar_expand_filer_entries(u32 begin, u32 length,
1215 1243
1216static int gfar_get_next_cluster_start(int start, struct filer_table *tab) 1244static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
1217{ 1245{
1218 for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); start++) { 1246 for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
1219 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) 1247 start++) {
1220 == (RQFCR_AND | RQFCR_CLE)) 1248 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
1249 (RQFCR_AND | RQFCR_CLE))
1221 return start; 1250 return start;
1222 } 1251 }
1223 return -1; 1252 return -1;
@@ -1225,16 +1254,16 @@ static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
1225 1254
1226static int gfar_get_next_cluster_end(int start, struct filer_table *tab) 1255static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
1227{ 1256{
1228 for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); start++) { 1257 for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
1229 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) 1258 start++) {
1230 == (RQFCR_CLE)) 1259 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
1260 (RQFCR_CLE))
1231 return start; 1261 return start;
1232 } 1262 }
1233 return -1; 1263 return -1;
1234} 1264}
1235 1265
1236/* 1266/* Uses hardwares clustering option to reduce
1237 * Uses hardwares clustering option to reduce
1238 * the number of filer table entries 1267 * the number of filer table entries
1239 */ 1268 */
1240static void gfar_cluster_filer(struct filer_table *tab) 1269static void gfar_cluster_filer(struct filer_table *tab)
@@ -1244,8 +1273,7 @@ static void gfar_cluster_filer(struct filer_table *tab)
1244 while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) { 1273 while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
1245 j = i; 1274 j = i;
1246 while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) { 1275 while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
1247 /* 1276 /* The cluster entries self and the previous one
1248 * The cluster entries self and the previous one
1249 * (a mask) must be identical! 1277 * (a mask) must be identical!
1250 */ 1278 */
1251 if (tab->fe[i].ctrl != tab->fe[j].ctrl) 1279 if (tab->fe[i].ctrl != tab->fe[j].ctrl)
@@ -1260,21 +1288,21 @@ static void gfar_cluster_filer(struct filer_table *tab)
1260 jend = gfar_get_next_cluster_end(j, tab); 1288 jend = gfar_get_next_cluster_end(j, tab);
1261 if (jend == -1 || iend == -1) 1289 if (jend == -1 || iend == -1)
1262 break; 1290 break;
1263 /* 1291
1264 * First we make some free space, where our cluster 1292 /* First we make some free space, where our cluster
1265 * element should be. Then we copy it there and finally 1293 * element should be. Then we copy it there and finally
1266 * delete in from its old location. 1294 * delete in from its old location.
1267 */ 1295 */
1268 1296 if (gfar_expand_filer_entries(iend, (jend - j), tab) ==
1269 if (gfar_expand_filer_entries(iend, (jend - j), tab) 1297 -EINVAL)
1270 == -EINVAL)
1271 break; 1298 break;
1272 1299
1273 gfar_copy_filer_entries(&(tab->fe[iend + 1]), 1300 gfar_copy_filer_entries(&(tab->fe[iend + 1]),
1274 &(tab->fe[jend + 1]), jend - j); 1301 &(tab->fe[jend + 1]), jend - j);
1275 1302
1276 if (gfar_trim_filer_entries(jend - 1, 1303 if (gfar_trim_filer_entries(jend - 1,
1277 jend + (jend - j), tab) == -EINVAL) 1304 jend + (jend - j),
1305 tab) == -EINVAL)
1278 return; 1306 return;
1279 1307
1280 /* Mask out cluster bit */ 1308 /* Mask out cluster bit */
@@ -1285,8 +1313,9 @@ static void gfar_cluster_filer(struct filer_table *tab)
1285 1313
1286/* Swaps the masked bits of a1<>a2 and b1<>b2 */ 1314/* Swaps the masked bits of a1<>a2 and b1<>b2 */
1287static void gfar_swap_bits(struct gfar_filer_entry *a1, 1315static void gfar_swap_bits(struct gfar_filer_entry *a1,
1288 struct gfar_filer_entry *a2, struct gfar_filer_entry *b1, 1316 struct gfar_filer_entry *a2,
1289 struct gfar_filer_entry *b2, u32 mask) 1317 struct gfar_filer_entry *b1,
1318 struct gfar_filer_entry *b2, u32 mask)
1290{ 1319{
1291 u32 temp[4]; 1320 u32 temp[4];
1292 temp[0] = a1->ctrl & mask; 1321 temp[0] = a1->ctrl & mask;
@@ -1305,13 +1334,12 @@ static void gfar_swap_bits(struct gfar_filer_entry *a1,
1305 b2->ctrl |= temp[2]; 1334 b2->ctrl |= temp[2];
1306} 1335}
1307 1336
1308/* 1337/* Generate a list consisting of masks values with their start and
1309 * Generate a list consisting of masks values with their start and
1310 * end of validity and block as indicator for parts belonging 1338 * end of validity and block as indicator for parts belonging
1311 * together (glued by ANDs) in mask_table 1339 * together (glued by ANDs) in mask_table
1312 */ 1340 */
1313static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table, 1341static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
1314 struct filer_table *tab) 1342 struct filer_table *tab)
1315{ 1343{
1316 u32 i, and_index = 0, block_index = 1; 1344 u32 i, and_index = 0, block_index = 1;
1317 1345
@@ -1327,13 +1355,13 @@ static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
1327 and_index++; 1355 and_index++;
1328 } 1356 }
1329 /* cluster starts and ends will be separated because they should 1357 /* cluster starts and ends will be separated because they should
1330 * hold their position */ 1358 * hold their position
1359 */
1331 if (tab->fe[i].ctrl & RQFCR_CLE) 1360 if (tab->fe[i].ctrl & RQFCR_CLE)
1332 block_index++; 1361 block_index++;
1333 /* A not set AND indicates the end of a depended block */ 1362 /* A not set AND indicates the end of a depended block */
1334 if (!(tab->fe[i].ctrl & RQFCR_AND)) 1363 if (!(tab->fe[i].ctrl & RQFCR_AND))
1335 block_index++; 1364 block_index++;
1336
1337 } 1365 }
1338 1366
1339 mask_table[and_index - 1].end = i - 1; 1367 mask_table[and_index - 1].end = i - 1;
@@ -1341,14 +1369,13 @@ static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
1341 return and_index; 1369 return and_index;
1342} 1370}
1343 1371
1344/* 1372/* Sorts the entries of mask_table by the values of the masks.
1345 * Sorts the entries of mask_table by the values of the masks.
1346 * Important: The 0xFF80 flags of the first and last entry of a 1373 * Important: The 0xFF80 flags of the first and last entry of a
1347 * block must hold their position (which queue, CLusterEnable, ReJEct, 1374 * block must hold their position (which queue, CLusterEnable, ReJEct,
1348 * AND) 1375 * AND)
1349 */ 1376 */
1350static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table, 1377static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
1351 struct filer_table *temp_table, u32 and_index) 1378 struct filer_table *temp_table, u32 and_index)
1352{ 1379{
1353 /* Pointer to compare function (_asc or _desc) */ 1380 /* Pointer to compare function (_asc or _desc) */
1354 int (*gfar_comp)(const void *, const void *); 1381 int (*gfar_comp)(const void *, const void *);
@@ -1359,16 +1386,16 @@ static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
1359 gfar_comp = &gfar_comp_desc; 1386 gfar_comp = &gfar_comp_desc;
1360 1387
1361 for (i = 0; i < and_index; i++) { 1388 for (i = 0; i < and_index; i++) {
1362
1363 if (prev != mask_table[i].block) { 1389 if (prev != mask_table[i].block) {
1364 old_first = mask_table[start].start + 1; 1390 old_first = mask_table[start].start + 1;
1365 old_last = mask_table[i - 1].end; 1391 old_last = mask_table[i - 1].end;
1366 sort(mask_table + start, size, 1392 sort(mask_table + start, size,
1367 sizeof(struct gfar_mask_entry), 1393 sizeof(struct gfar_mask_entry),
1368 gfar_comp, &gfar_swap); 1394 gfar_comp, &gfar_swap);
1369 1395
1370 /* Toggle order for every block. This makes the 1396 /* Toggle order for every block. This makes the
1371 * thing more efficient! */ 1397 * thing more efficient!
1398 */
1372 if (gfar_comp == gfar_comp_desc) 1399 if (gfar_comp == gfar_comp_desc)
1373 gfar_comp = &gfar_comp_asc; 1400 gfar_comp = &gfar_comp_asc;
1374 else 1401 else
@@ -1378,12 +1405,11 @@ static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
1378 new_last = mask_table[i - 1].end; 1405 new_last = mask_table[i - 1].end;
1379 1406
1380 gfar_swap_bits(&temp_table->fe[new_first], 1407 gfar_swap_bits(&temp_table->fe[new_first],
1381 &temp_table->fe[old_first], 1408 &temp_table->fe[old_first],
1382 &temp_table->fe[new_last], 1409 &temp_table->fe[new_last],
1383 &temp_table->fe[old_last], 1410 &temp_table->fe[old_last],
1384 RQFCR_QUEUE | RQFCR_CLE | 1411 RQFCR_QUEUE | RQFCR_CLE |
1385 RQFCR_RJE | RQFCR_AND 1412 RQFCR_RJE | RQFCR_AND);
1386 );
1387 1413
1388 start = i; 1414 start = i;
1389 size = 0; 1415 size = 0;
@@ -1391,11 +1417,9 @@ static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
1391 size++; 1417 size++;
1392 prev = mask_table[i].block; 1418 prev = mask_table[i].block;
1393 } 1419 }
1394
1395} 1420}
1396 1421
1397/* 1422/* Reduces the number of masks needed in the filer table to save entries
1398 * Reduces the number of masks needed in the filer table to save entries
1399 * This is done by sorting the masks of a depended block. A depended block is 1423 * This is done by sorting the masks of a depended block. A depended block is
1400 * identified by gluing ANDs or CLE. The sorting order toggles after every 1424 * identified by gluing ANDs or CLE. The sorting order toggles after every
1401 * block. Of course entries in scope of a mask must change their location with 1425 * block. Of course entries in scope of a mask must change their location with
@@ -1410,13 +1434,14 @@ static int gfar_optimize_filer_masks(struct filer_table *tab)
1410 s32 ret = 0; 1434 s32 ret = 0;
1411 1435
1412 /* We need a copy of the filer table because 1436 /* We need a copy of the filer table because
1413 * we want to change its order */ 1437 * we want to change its order
1438 */
1414 temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL); 1439 temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
1415 if (temp_table == NULL) 1440 if (temp_table == NULL)
1416 return -ENOMEM; 1441 return -ENOMEM;
1417 1442
1418 mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1, 1443 mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
1419 sizeof(struct gfar_mask_entry), GFP_KERNEL); 1444 sizeof(struct gfar_mask_entry), GFP_KERNEL);
1420 1445
1421 if (mask_table == NULL) { 1446 if (mask_table == NULL) {
1422 ret = -ENOMEM; 1447 ret = -ENOMEM;
@@ -1428,7 +1453,8 @@ static int gfar_optimize_filer_masks(struct filer_table *tab)
1428 gfar_sort_mask_table(mask_table, temp_table, and_index); 1453 gfar_sort_mask_table(mask_table, temp_table, and_index);
1429 1454
1430 /* Now we can copy the data from our duplicated filer table to 1455 /* Now we can copy the data from our duplicated filer table to
1431 * the real one in the order the mask table says */ 1456 * the real one in the order the mask table says
1457 */
1432 for (i = 0; i < and_index; i++) { 1458 for (i = 0; i < and_index; i++) {
1433 size = mask_table[i].end - mask_table[i].start + 1; 1459 size = mask_table[i].end - mask_table[i].start + 1;
1434 gfar_copy_filer_entries(&(tab->fe[j]), 1460 gfar_copy_filer_entries(&(tab->fe[j]),
@@ -1437,7 +1463,8 @@ static int gfar_optimize_filer_masks(struct filer_table *tab)
1437 } 1463 }
1438 1464
1439 /* And finally we just have to check for duplicated masks and drop the 1465 /* And finally we just have to check for duplicated masks and drop the
1440 * second ones */ 1466 * second ones
1467 */
1441 for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) { 1468 for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
1442 if (tab->fe[i].ctrl == 0x80) { 1469 if (tab->fe[i].ctrl == 0x80) {
1443 previous_mask = i++; 1470 previous_mask = i++;
@@ -1448,7 +1475,8 @@ static int gfar_optimize_filer_masks(struct filer_table *tab)
1448 if (tab->fe[i].ctrl == 0x80) { 1475 if (tab->fe[i].ctrl == 0x80) {
1449 if (tab->fe[i].prop == tab->fe[previous_mask].prop) { 1476 if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
1450 /* Two identical ones found! 1477 /* Two identical ones found!
1451 * So drop the second one! */ 1478 * So drop the second one!
1479 */
1452 gfar_trim_filer_entries(i, i, tab); 1480 gfar_trim_filer_entries(i, i, tab);
1453 } else 1481 } else
1454 /* Not identical! */ 1482 /* Not identical! */
@@ -1463,7 +1491,7 @@ end: kfree(temp_table);
1463 1491
1464/* Write the bit-pattern from software's buffer to hardware registers */ 1492/* Write the bit-pattern from software's buffer to hardware registers */
1465static int gfar_write_filer_table(struct gfar_private *priv, 1493static int gfar_write_filer_table(struct gfar_private *priv,
1466 struct filer_table *tab) 1494 struct filer_table *tab)
1467{ 1495{
1468 u32 i = 0; 1496 u32 i = 0;
1469 if (tab->index > MAX_FILER_IDX - 1) 1497 if (tab->index > MAX_FILER_IDX - 1)
@@ -1473,13 +1501,15 @@ static int gfar_write_filer_table(struct gfar_private *priv,
1473 lock_rx_qs(priv); 1501 lock_rx_qs(priv);
1474 1502
1475 /* Fill regular entries */ 1503 /* Fill regular entries */
1476 for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl); i++) 1504 for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl);
1505 i++)
1477 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop); 1506 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
1478 /* Fill the rest with fall-troughs */ 1507 /* Fill the rest with fall-troughs */
1479 for (; i < MAX_FILER_IDX - 1; i++) 1508 for (; i < MAX_FILER_IDX - 1; i++)
1480 gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF); 1509 gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
1481 /* Last entry must be default accept 1510 /* Last entry must be default accept
1482 * because that's what people expect */ 1511 * because that's what people expect
1512 */
1483 gfar_write_filer(priv, i, 0x20, 0x0); 1513 gfar_write_filer(priv, i, 0x20, 0x0);
1484 1514
1485 unlock_rx_qs(priv); 1515 unlock_rx_qs(priv);
@@ -1488,21 +1518,21 @@ static int gfar_write_filer_table(struct gfar_private *priv,
1488} 1518}
1489 1519
1490static int gfar_check_capability(struct ethtool_rx_flow_spec *flow, 1520static int gfar_check_capability(struct ethtool_rx_flow_spec *flow,
1491 struct gfar_private *priv) 1521 struct gfar_private *priv)
1492{ 1522{
1493 1523
1494 if (flow->flow_type & FLOW_EXT) { 1524 if (flow->flow_type & FLOW_EXT) {
1495 if (~flow->m_ext.data[0] || ~flow->m_ext.data[1]) 1525 if (~flow->m_ext.data[0] || ~flow->m_ext.data[1])
1496 netdev_warn(priv->ndev, 1526 netdev_warn(priv->ndev,
1497 "User-specific data not supported!\n"); 1527 "User-specific data not supported!\n");
1498 if (~flow->m_ext.vlan_etype) 1528 if (~flow->m_ext.vlan_etype)
1499 netdev_warn(priv->ndev, 1529 netdev_warn(priv->ndev,
1500 "VLAN-etype not supported!\n"); 1530 "VLAN-etype not supported!\n");
1501 } 1531 }
1502 if (flow->flow_type == IP_USER_FLOW) 1532 if (flow->flow_type == IP_USER_FLOW)
1503 if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4) 1533 if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
1504 netdev_warn(priv->ndev, 1534 netdev_warn(priv->ndev,
1505 "IP-Version differing from IPv4 not supported!\n"); 1535 "IP-Version differing from IPv4 not supported!\n");
1506 1536
1507 return 0; 1537 return 0;
1508} 1538}
@@ -1520,15 +1550,18 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
1520 return -ENOMEM; 1550 return -ENOMEM;
1521 1551
1522 /* Now convert the existing filer data from flow_spec into 1552 /* Now convert the existing filer data from flow_spec into
1523 * filer tables binary format */ 1553 * filer tables binary format
1554 */
1524 list_for_each_entry(j, &priv->rx_list.list, list) { 1555 list_for_each_entry(j, &priv->rx_list.list, list) {
1525 ret = gfar_convert_to_filer(&j->fs, tab); 1556 ret = gfar_convert_to_filer(&j->fs, tab);
1526 if (ret == -EBUSY) { 1557 if (ret == -EBUSY) {
1527 netdev_err(priv->ndev, "Rule not added: No free space!\n"); 1558 netdev_err(priv->ndev,
1559 "Rule not added: No free space!\n");
1528 goto end; 1560 goto end;
1529 } 1561 }
1530 if (ret == -1) { 1562 if (ret == -1) {
1531 netdev_err(priv->ndev, "Rule not added: Unsupported Flow-type!\n"); 1563 netdev_err(priv->ndev,
1564 "Rule not added: Unsupported Flow-type!\n");
1532 goto end; 1565 goto end;
1533 } 1566 }
1534 } 1567 }
@@ -1540,9 +1573,9 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
1540 gfar_optimize_filer_masks(tab); 1573 gfar_optimize_filer_masks(tab);
1541 1574
1542 pr_debug("\n\tSummary:\n" 1575 pr_debug("\n\tSummary:\n"
1543 "\tData on hardware: %d\n" 1576 "\tData on hardware: %d\n"
1544 "\tCompression rate: %d%%\n", 1577 "\tCompression rate: %d%%\n",
1545 tab->index, 100 - (100 * tab->index) / i); 1578 tab->index, 100 - (100 * tab->index) / i);
1546 1579
1547 /* Write everything to hardware */ 1580 /* Write everything to hardware */
1548 ret = gfar_write_filer_table(priv, tab); 1581 ret = gfar_write_filer_table(priv, tab);
@@ -1551,7 +1584,8 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
1551 goto end; 1584 goto end;
1552 } 1585 }
1553 1586
1554end: kfree(tab); 1587end:
1588 kfree(tab);
1555 return ret; 1589 return ret;
1556} 1590}
1557 1591
@@ -1569,7 +1603,7 @@ static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow)
1569} 1603}
1570 1604
1571static int gfar_add_cls(struct gfar_private *priv, 1605static int gfar_add_cls(struct gfar_private *priv,
1572 struct ethtool_rx_flow_spec *flow) 1606 struct ethtool_rx_flow_spec *flow)
1573{ 1607{
1574 struct ethtool_flow_spec_container *temp, *comp; 1608 struct ethtool_flow_spec_container *temp, *comp;
1575 int ret = 0; 1609 int ret = 0;
@@ -1591,7 +1625,6 @@ static int gfar_add_cls(struct gfar_private *priv,
1591 list_add(&temp->list, &priv->rx_list.list); 1625 list_add(&temp->list, &priv->rx_list.list);
1592 goto process; 1626 goto process;
1593 } else { 1627 } else {
1594
1595 list_for_each_entry(comp, &priv->rx_list.list, list) { 1628 list_for_each_entry(comp, &priv->rx_list.list, list) {
1596 if (comp->fs.location > flow->location) { 1629 if (comp->fs.location > flow->location) {
1597 list_add_tail(&temp->list, &comp->list); 1630 list_add_tail(&temp->list, &comp->list);
@@ -1599,8 +1632,8 @@ static int gfar_add_cls(struct gfar_private *priv,
1599 } 1632 }
1600 if (comp->fs.location == flow->location) { 1633 if (comp->fs.location == flow->location) {
1601 netdev_err(priv->ndev, 1634 netdev_err(priv->ndev,
1602 "Rule not added: ID %d not free!\n", 1635 "Rule not added: ID %d not free!\n",
1603 flow->location); 1636 flow->location);
1604 ret = -EBUSY; 1637 ret = -EBUSY;
1605 goto clean_mem; 1638 goto clean_mem;
1606 } 1639 }
@@ -1642,7 +1675,6 @@ static int gfar_del_cls(struct gfar_private *priv, u32 loc)
1642 } 1675 }
1643 1676
1644 return ret; 1677 return ret;
1645
1646} 1678}
1647 1679
1648static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd) 1680static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
@@ -1663,7 +1695,7 @@ static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
1663} 1695}
1664 1696
1665static int gfar_get_cls_all(struct gfar_private *priv, 1697static int gfar_get_cls_all(struct gfar_private *priv,
1666 struct ethtool_rxnfc *cmd, u32 *rule_locs) 1698 struct ethtool_rxnfc *cmd, u32 *rule_locs)
1667{ 1699{
1668 struct ethtool_flow_spec_container *comp; 1700 struct ethtool_flow_spec_container *comp;
1669 u32 i = 0; 1701 u32 i = 0;
@@ -1714,7 +1746,7 @@ static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1714} 1746}
1715 1747
1716static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 1748static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1717 u32 *rule_locs) 1749 u32 *rule_locs)
1718{ 1750{
1719 struct gfar_private *priv = netdev_priv(dev); 1751 struct gfar_private *priv = netdev_priv(dev);
1720 int ret = 0; 1752 int ret = 0;
@@ -1748,23 +1780,19 @@ static int gfar_get_ts_info(struct net_device *dev,
1748 struct gfar_private *priv = netdev_priv(dev); 1780 struct gfar_private *priv = netdev_priv(dev);
1749 1781
1750 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) { 1782 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
1751 info->so_timestamping = 1783 info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
1752 SOF_TIMESTAMPING_RX_SOFTWARE | 1784 SOF_TIMESTAMPING_SOFTWARE;
1753 SOF_TIMESTAMPING_SOFTWARE;
1754 info->phc_index = -1; 1785 info->phc_index = -1;
1755 return 0; 1786 return 0;
1756 } 1787 }
1757 info->so_timestamping = 1788 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1758 SOF_TIMESTAMPING_TX_HARDWARE | 1789 SOF_TIMESTAMPING_RX_HARDWARE |
1759 SOF_TIMESTAMPING_RX_HARDWARE | 1790 SOF_TIMESTAMPING_RAW_HARDWARE;
1760 SOF_TIMESTAMPING_RAW_HARDWARE;
1761 info->phc_index = gfar_phc_index; 1791 info->phc_index = gfar_phc_index;
1762 info->tx_types = 1792 info->tx_types = (1 << HWTSTAMP_TX_OFF) |
1763 (1 << HWTSTAMP_TX_OFF) | 1793 (1 << HWTSTAMP_TX_ON);
1764 (1 << HWTSTAMP_TX_ON); 1794 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1765 info->rx_filters = 1795 (1 << HWTSTAMP_FILTER_ALL);
1766 (1 << HWTSTAMP_FILTER_NONE) |
1767 (1 << HWTSTAMP_FILTER_ALL);
1768 return 0; 1796 return 0;
1769} 1797}
1770 1798
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 9ac14f804851..21c6574c5f15 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -185,7 +185,7 @@ static void mem_disp(u8 *addr, int size)
185 for (; (u32) i < (u32) addr + size4Aling; i += 4) 185 for (; (u32) i < (u32) addr + size4Aling; i += 4)
186 printk("%08x ", *((u32 *) (i))); 186 printk("%08x ", *((u32 *) (i)));
187 for (; (u32) i < (u32) addr + size; i++) 187 for (; (u32) i < (u32) addr + size; i++)
188 printk("%02x", *((u8 *) (i))); 188 printk("%02x", *((i)));
189 if (notAlign == 1) 189 if (notAlign == 1)
190 printk("\r\n"); 190 printk("\r\n");
191} 191}
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index d496673f0908..3f4391bede81 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -1217,7 +1217,7 @@ static int hp100_init_rxpdl(struct net_device *dev,
1217 1217
1218 ringptr->pdl = pdlptr + 1; 1218 ringptr->pdl = pdlptr + 1;
1219 ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr + 1); 1219 ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr + 1);
1220 ringptr->skb = (void *) NULL; 1220 ringptr->skb = NULL;
1221 1221
1222 /* 1222 /*
1223 * Write address and length of first PDL Fragment (which is used for 1223 * Write address and length of first PDL Fragment (which is used for
@@ -1243,7 +1243,7 @@ static int hp100_init_txpdl(struct net_device *dev,
1243 1243
1244 ringptr->pdl = pdlptr; /* +1; */ 1244 ringptr->pdl = pdlptr; /* +1; */
1245 ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr); /* +1 */ 1245 ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr); /* +1 */
1246 ringptr->skb = (void *) NULL; 1246 ringptr->skb = NULL;
1247 1247
1248 return roundup(MAX_TX_FRAG * 2 + 2, 4); 1248 return roundup(MAX_TX_FRAG * 2 + 2, 4);
1249} 1249}
@@ -1628,7 +1628,7 @@ static void hp100_clean_txring(struct net_device *dev)
1628 /* Conversion to new PCI API : NOP */ 1628 /* Conversion to new PCI API : NOP */
1629 pci_unmap_single(lp->pci_dev, (dma_addr_t) lp->txrhead->pdl[1], lp->txrhead->pdl[2], PCI_DMA_TODEVICE); 1629 pci_unmap_single(lp->pci_dev, (dma_addr_t) lp->txrhead->pdl[1], lp->txrhead->pdl[2], PCI_DMA_TODEVICE);
1630 dev_kfree_skb_any(lp->txrhead->skb); 1630 dev_kfree_skb_any(lp->txrhead->skb);
1631 lp->txrhead->skb = (void *) NULL; 1631 lp->txrhead->skb = NULL;
1632 lp->txrhead = lp->txrhead->next; 1632 lp->txrhead = lp->txrhead->next;
1633 lp->txrcommit--; 1633 lp->txrcommit--;
1634 } 1634 }
diff --git a/drivers/net/ethernet/i825xx/lp486e.c b/drivers/net/ethernet/i825xx/lp486e.c
index 6c2952c8ea15..3735bfa53600 100644
--- a/drivers/net/ethernet/i825xx/lp486e.c
+++ b/drivers/net/ethernet/i825xx/lp486e.c
@@ -629,10 +629,10 @@ init_i596(struct net_device *dev) {
629 629
630 memcpy ((void *)lp->eth_addr, dev->dev_addr, 6); 630 memcpy ((void *)lp->eth_addr, dev->dev_addr, 6);
631 lp->set_add.command = CmdIASetup; 631 lp->set_add.command = CmdIASetup;
632 i596_add_cmd(dev, (struct i596_cmd *)&lp->set_add); 632 i596_add_cmd(dev, &lp->set_add);
633 633
634 lp->tdr.command = CmdTDR; 634 lp->tdr.command = CmdTDR;
635 i596_add_cmd(dev, (struct i596_cmd *)&lp->tdr); 635 i596_add_cmd(dev, &lp->tdr);
636 636
637 if (lp->scb.command && i596_timeout(dev, "i82596 init", 200)) 637 if (lp->scb.command && i596_timeout(dev, "i82596 init", 200))
638 return 1; 638 return 1;
@@ -737,7 +737,7 @@ i596_cleanup_cmd(struct net_device *dev) {
737 737
738 lp = netdev_priv(dev); 738 lp = netdev_priv(dev);
739 while (lp->cmd_head) { 739 while (lp->cmd_head) {
740 cmd = (struct i596_cmd *)lp->cmd_head; 740 cmd = lp->cmd_head;
741 741
742 lp->cmd_head = pa_to_va(lp->cmd_head->pa_next); 742 lp->cmd_head = pa_to_va(lp->cmd_head->pa_next);
743 lp->cmd_backlog--; 743 lp->cmd_backlog--;
@@ -1281,7 +1281,7 @@ static void set_multicast_list(struct net_device *dev) {
1281 lp->i596_config[8] |= 0x01; 1281 lp->i596_config[8] |= 0x01;
1282 } 1282 }
1283 1283
1284 i596_add_cmd(dev, (struct i596_cmd *) &lp->set_conf); 1284 i596_add_cmd(dev, &lp->set_conf);
1285 } 1285 }
1286} 1286}
1287 1287
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index cae17f4bc93e..353f57f675d0 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -571,7 +571,7 @@ static int init586(struct net_device *dev)
571 } 571 }
572#endif 572#endif
573 573
574 ptr = alloc_rfa(dev,(void *)ptr); /* init receive-frame-area */ 574 ptr = alloc_rfa(dev,ptr); /* init receive-frame-area */
575 575
576 /* 576 /*
577 * alloc xmit-buffs / init xmit_cmds 577 * alloc xmit-buffs / init xmit_cmds
@@ -584,7 +584,7 @@ static int init586(struct net_device *dev)
584 ptr = (char *) ptr + XMIT_BUFF_SIZE; 584 ptr = (char *) ptr + XMIT_BUFF_SIZE;
585 p->xmit_buffs[i] = (struct tbd_struct *)ptr; /* TBD */ 585 p->xmit_buffs[i] = (struct tbd_struct *)ptr; /* TBD */
586 ptr = (char *) ptr + sizeof(struct tbd_struct); 586 ptr = (char *) ptr + sizeof(struct tbd_struct);
587 if((void *)ptr > (void *)dev->mem_end) 587 if(ptr > (void *)dev->mem_end)
588 { 588 {
589 printk("%s: not enough shared-mem for your configuration!\n",dev->name); 589 printk("%s: not enough shared-mem for your configuration!\n",dev->name);
590 return 1; 590 return 1;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
index 4fb47f14dbfe..cb66f574dc97 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
@@ -376,9 +376,7 @@ int ehea_destroy_eq(struct ehea_eq *eq)
376 return 0; 376 return 0;
377} 377}
378 378
379/** 379/* allocates memory for a queue and registers pages in phyp */
380 * allocates memory for a queue and registers pages in phyp
381 */
382static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue, 380static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
383 int nr_pages, int wqe_size, int act_nr_sges, 381 int nr_pages, int wqe_size, int act_nr_sges,
384 struct ehea_adapter *adapter, int h_call_q_selector) 382 struct ehea_adapter *adapter, int h_call_q_selector)
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index ada720b42ff6..535f94fac4a1 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1249,20 +1249,35 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
1249 const struct firmware *fw = nic->fw; 1249 const struct firmware *fw = nic->fw;
1250 u8 timer, bundle, min_size; 1250 u8 timer, bundle, min_size;
1251 int err = 0; 1251 int err = 0;
1252 bool required = false;
1252 1253
1253 /* do not load u-code for ICH devices */ 1254 /* do not load u-code for ICH devices */
1254 if (nic->flags & ich) 1255 if (nic->flags & ich)
1255 return NULL; 1256 return NULL;
1256 1257
1257 /* Search for ucode match against h/w revision */ 1258 /* Search for ucode match against h/w revision
1258 if (nic->mac == mac_82559_D101M) 1259 *
1260 * Based on comments in the source code for the FreeBSD fxp
1261 * driver, the FIRMWARE_D102E ucode includes both CPUSaver and
1262 *
1263 * "fixes for bugs in the B-step hardware (specifically, bugs
1264 * with Inline Receive)."
1265 *
1266 * So we must fail if it cannot be loaded.
1267 *
1268 * The other microcode files are only required for the optional
1269 * CPUSaver feature. Nice to have, but no reason to fail.
1270 */
1271 if (nic->mac == mac_82559_D101M) {
1259 fw_name = FIRMWARE_D101M; 1272 fw_name = FIRMWARE_D101M;
1260 else if (nic->mac == mac_82559_D101S) 1273 } else if (nic->mac == mac_82559_D101S) {
1261 fw_name = FIRMWARE_D101S; 1274 fw_name = FIRMWARE_D101S;
1262 else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) 1275 } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) {
1263 fw_name = FIRMWARE_D102E; 1276 fw_name = FIRMWARE_D102E;
1264 else /* No ucode on other devices */ 1277 required = true;
1278 } else { /* No ucode on other devices */
1265 return NULL; 1279 return NULL;
1280 }
1266 1281
1267 /* If the firmware has not previously been loaded, request a pointer 1282 /* If the firmware has not previously been loaded, request a pointer
1268 * to it. If it was previously loaded, we are reinitializing the 1283 * to it. If it was previously loaded, we are reinitializing the
@@ -1273,10 +1288,17 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
1273 err = request_firmware(&fw, fw_name, &nic->pdev->dev); 1288 err = request_firmware(&fw, fw_name, &nic->pdev->dev);
1274 1289
1275 if (err) { 1290 if (err) {
1276 netif_err(nic, probe, nic->netdev, 1291 if (required) {
1277 "Failed to load firmware \"%s\": %d\n", 1292 netif_err(nic, probe, nic->netdev,
1278 fw_name, err); 1293 "Failed to load firmware \"%s\": %d\n",
1279 return ERR_PTR(err); 1294 fw_name, err);
1295 return ERR_PTR(err);
1296 } else {
1297 netif_info(nic, probe, nic->netdev,
1298 "CPUSaver disabled. Needs \"%s\": %d\n",
1299 fw_name, err);
1300 return NULL;
1301 }
1280 } 1302 }
1281 1303
1282 /* Firmware should be precisely UCODE_SIZE (words) plus three bytes 1304 /* Firmware should be precisely UCODE_SIZE (words) plus three bytes
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index c526279e4927..3d6839528761 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -399,7 +399,7 @@ void e1000_set_media_type(struct e1000_hw *hw)
399} 399}
400 400
401/** 401/**
402 * e1000_reset_hw: reset the hardware completely 402 * e1000_reset_hw - reset the hardware completely
403 * @hw: Struct containing variables accessed by shared code 403 * @hw: Struct containing variables accessed by shared code
404 * 404 *
405 * Reset the transmit and receive units; mask and clear all interrupts. 405 * Reset the transmit and receive units; mask and clear all interrupts.
@@ -546,7 +546,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
546} 546}
547 547
548/** 548/**
549 * e1000_init_hw: Performs basic configuration of the adapter. 549 * e1000_init_hw - Performs basic configuration of the adapter.
550 * @hw: Struct containing variables accessed by shared code 550 * @hw: Struct containing variables accessed by shared code
551 * 551 *
552 * Assumes that the controller has previously been reset and is in a 552 * Assumes that the controller has previously been reset and is in a
@@ -2591,7 +2591,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
2591 * @hw: Struct containing variables accessed by shared code 2591 * @hw: Struct containing variables accessed by shared code
2592 * @speed: Speed of the connection 2592 * @speed: Speed of the connection
2593 * @duplex: Duplex setting of the connection 2593 * @duplex: Duplex setting of the connection
2594 2594 *
2595 * Detects the current speed and duplex settings of the hardware. 2595 * Detects the current speed and duplex settings of the hardware.
2596 */ 2596 */
2597s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) 2597s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
@@ -2959,7 +2959,7 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
2959 * @hw: Struct containing variables accessed by shared code 2959 * @hw: Struct containing variables accessed by shared code
2960 * @reg_addr: address of the PHY register to write 2960 * @reg_addr: address of the PHY register to write
2961 * @data: data to write to the PHY 2961 * @data: data to write to the PHY
2962 2962 *
2963 * Writes a value to a PHY register 2963 * Writes a value to a PHY register
2964 */ 2964 */
2965s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data) 2965s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 7483ca0a6282..3bfbb8df8989 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -721,9 +721,7 @@ void e1000_reset(struct e1000_adapter *adapter)
721 e1000_release_manageability(adapter); 721 e1000_release_manageability(adapter);
722} 722}
723 723
724/** 724/* Dump the eeprom for users having checksum issues */
725 * Dump the eeprom for users having checksum issues
726 **/
727static void e1000_dump_eeprom(struct e1000_adapter *adapter) 725static void e1000_dump_eeprom(struct e1000_adapter *adapter)
728{ 726{
729 struct net_device *netdev = adapter->netdev; 727 struct net_device *netdev = adapter->netdev;
@@ -1078,18 +1076,18 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1078 netdev->priv_flags |= IFF_SUPP_NOFCS; 1076 netdev->priv_flags |= IFF_SUPP_NOFCS;
1079 1077
1080 netdev->features |= netdev->hw_features; 1078 netdev->features |= netdev->hw_features;
1081 netdev->hw_features |= NETIF_F_RXCSUM; 1079 netdev->hw_features |= (NETIF_F_RXCSUM |
1082 netdev->hw_features |= NETIF_F_RXALL; 1080 NETIF_F_RXALL |
1083 netdev->hw_features |= NETIF_F_RXFCS; 1081 NETIF_F_RXFCS);
1084 1082
1085 if (pci_using_dac) { 1083 if (pci_using_dac) {
1086 netdev->features |= NETIF_F_HIGHDMA; 1084 netdev->features |= NETIF_F_HIGHDMA;
1087 netdev->vlan_features |= NETIF_F_HIGHDMA; 1085 netdev->vlan_features |= NETIF_F_HIGHDMA;
1088 } 1086 }
1089 1087
1090 netdev->vlan_features |= NETIF_F_TSO; 1088 netdev->vlan_features |= (NETIF_F_TSO |
1091 netdev->vlan_features |= NETIF_F_HW_CSUM; 1089 NETIF_F_HW_CSUM |
1092 netdev->vlan_features |= NETIF_F_SG; 1090 NETIF_F_SG);
1093 1091
1094 netdev->priv_flags |= IFF_UNICAST_FLT; 1092 netdev->priv_flags |= IFF_UNICAST_FLT;
1095 1093
@@ -3056,14 +3054,13 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
3056 mmiowb(); 3054 mmiowb();
3057} 3055}
3058 3056
3059/** 3057/* 82547 workaround to avoid controller hang in half-duplex environment.
3060 * 82547 workaround to avoid controller hang in half-duplex environment.
3061 * The workaround is to avoid queuing a large packet that would span 3058 * The workaround is to avoid queuing a large packet that would span
3062 * the internal Tx FIFO ring boundary by notifying the stack to resend 3059 * the internal Tx FIFO ring boundary by notifying the stack to resend
3063 * the packet at a later time. This gives the Tx FIFO an opportunity to 3060 * the packet at a later time. This gives the Tx FIFO an opportunity to
3064 * flush all packets. When that occurs, we reset the Tx FIFO pointers 3061 * flush all packets. When that occurs, we reset the Tx FIFO pointers
3065 * to the beginning of the Tx FIFO. 3062 * to the beginning of the Tx FIFO.
3066 **/ 3063 */
3067 3064
3068#define E1000_FIFO_HDR 0x10 3065#define E1000_FIFO_HDR 0x10
3069#define E1000_82547_PAD_LEN 0x3E0 3066#define E1000_82547_PAD_LEN 0x3E0
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 1f063dcd8f85..0b3bade957fd 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1680,16 +1680,18 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1680 e_dbg("ANYSTATE -> DOWN\n"); 1680 e_dbg("ANYSTATE -> DOWN\n");
1681 } else { 1681 } else {
1682 /* 1682 /*
1683 * Check several times, if Sync and Config 1683 * Check several times, if SYNCH bit and CONFIG
1684 * both are consistently 1 then simply ignore 1684 * bit both are consistently 1 then simply ignore
1685 * the Invalid bit and restart Autoneg 1685 * the IV bit and restart Autoneg
1686 */ 1686 */
1687 for (i = 0; i < AN_RETRY_COUNT; i++) { 1687 for (i = 0; i < AN_RETRY_COUNT; i++) {
1688 udelay(10); 1688 udelay(10);
1689 rxcw = er32(RXCW); 1689 rxcw = er32(RXCW);
1690 if ((rxcw & E1000_RXCW_IV) && 1690 if ((rxcw & E1000_RXCW_SYNCH) &&
1691 !((rxcw & E1000_RXCW_SYNCH) && 1691 (rxcw & E1000_RXCW_C))
1692 (rxcw & E1000_RXCW_C))) { 1692 continue;
1693
1694 if (rxcw & E1000_RXCW_IV) {
1693 mac->serdes_has_link = false; 1695 mac->serdes_has_link = false;
1694 mac->serdes_link_state = 1696 mac->serdes_link_state =
1695 e1000_serdes_link_down; 1697 e1000_serdes_link_down;
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 6e6fffb34581..cd153326c3cf 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -514,6 +514,7 @@ extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
514extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); 514extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
515extern void e1000e_get_hw_control(struct e1000_adapter *adapter); 515extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
516extern void e1000e_release_hw_control(struct e1000_adapter *adapter); 516extern void e1000e_release_hw_control(struct e1000_adapter *adapter);
517extern void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr);
517 518
518extern unsigned int copybreak; 519extern unsigned int copybreak;
519 520
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 905e2147d918..105d554ea9db 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -1897,7 +1897,6 @@ static int e1000_set_coalesce(struct net_device *netdev,
1897 struct ethtool_coalesce *ec) 1897 struct ethtool_coalesce *ec)
1898{ 1898{
1899 struct e1000_adapter *adapter = netdev_priv(netdev); 1899 struct e1000_adapter *adapter = netdev_priv(netdev);
1900 struct e1000_hw *hw = &adapter->hw;
1901 1900
1902 if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) || 1901 if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
1903 ((ec->rx_coalesce_usecs > 4) && 1902 ((ec->rx_coalesce_usecs > 4) &&
@@ -1916,9 +1915,9 @@ static int e1000_set_coalesce(struct net_device *netdev,
1916 } 1915 }
1917 1916
1918 if (adapter->itr_setting != 0) 1917 if (adapter->itr_setting != 0)
1919 ew32(ITR, 1000000000 / (adapter->itr * 256)); 1918 e1000e_write_itr(adapter, adapter->itr);
1920 else 1919 else
1921 ew32(ITR, 0); 1920 e1000e_write_itr(adapter, 0);
1922 1921
1923 return 0; 1922 return 0;
1924} 1923}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 623e30b9964d..95b245310f17 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -2159,7 +2159,7 @@ void e1000e_release_hw_control(struct e1000_adapter *adapter)
2159} 2159}
2160 2160
2161/** 2161/**
2162 * @e1000_alloc_ring - allocate memory for a ring structure 2162 * e1000_alloc_ring_dma - allocate memory for a ring structure
2163 **/ 2163 **/
2164static int e1000_alloc_ring_dma(struct e1000_adapter *adapter, 2164static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
2165 struct e1000_ring *ring) 2165 struct e1000_ring *ring)
@@ -2474,6 +2474,30 @@ set_itr_now:
2474} 2474}
2475 2475
2476/** 2476/**
2477 * e1000e_write_itr - write the ITR value to the appropriate registers
2478 * @adapter: address of board private structure
2479 * @itr: new ITR value to program
2480 *
2481 * e1000e_write_itr determines if the adapter is in MSI-X mode
2482 * and, if so, writes the EITR registers with the ITR value.
2483 * Otherwise, it writes the ITR value into the ITR register.
2484 **/
2485void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr)
2486{
2487 struct e1000_hw *hw = &adapter->hw;
2488 u32 new_itr = itr ? 1000000000 / (itr * 256) : 0;
2489
2490 if (adapter->msix_entries) {
2491 int vector;
2492
2493 for (vector = 0; vector < adapter->num_vectors; vector++)
2494 writel(new_itr, hw->hw_addr + E1000_EITR_82574(vector));
2495 } else {
2496 ew32(ITR, new_itr);
2497 }
2498}
2499
2500/**
2477 * e1000_alloc_queues - Allocate memory for all rings 2501 * e1000_alloc_queues - Allocate memory for all rings
2478 * @adapter: board private structure to initialize 2502 * @adapter: board private structure to initialize
2479 **/ 2503 **/
@@ -3059,7 +3083,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
3059 /* irq moderation */ 3083 /* irq moderation */
3060 ew32(RADV, adapter->rx_abs_int_delay); 3084 ew32(RADV, adapter->rx_abs_int_delay);
3061 if ((adapter->itr_setting != 0) && (adapter->itr != 0)) 3085 if ((adapter->itr_setting != 0) && (adapter->itr != 0))
3062 ew32(ITR, 1000000000 / (adapter->itr * 256)); 3086 e1000e_write_itr(adapter, adapter->itr);
3063 3087
3064 ctrl_ext = er32(CTRL_EXT); 3088 ctrl_ext = er32(CTRL_EXT);
3065 /* Auto-Mask interrupts upon ICR access */ 3089 /* Auto-Mask interrupts upon ICR access */
@@ -3486,14 +3510,14 @@ void e1000e_reset(struct e1000_adapter *adapter)
3486 dev_info(&adapter->pdev->dev, 3510 dev_info(&adapter->pdev->dev,
3487 "Interrupt Throttle Rate turned off\n"); 3511 "Interrupt Throttle Rate turned off\n");
3488 adapter->flags2 |= FLAG2_DISABLE_AIM; 3512 adapter->flags2 |= FLAG2_DISABLE_AIM;
3489 ew32(ITR, 0); 3513 e1000e_write_itr(adapter, 0);
3490 } 3514 }
3491 } else if (adapter->flags2 & FLAG2_DISABLE_AIM) { 3515 } else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
3492 dev_info(&adapter->pdev->dev, 3516 dev_info(&adapter->pdev->dev,
3493 "Interrupt Throttle Rate turned on\n"); 3517 "Interrupt Throttle Rate turned on\n");
3494 adapter->flags2 &= ~FLAG2_DISABLE_AIM; 3518 adapter->flags2 &= ~FLAG2_DISABLE_AIM;
3495 adapter->itr = 20000; 3519 adapter->itr = 20000;
3496 ew32(ITR, 1000000000 / (adapter->itr * 256)); 3520 e1000e_write_itr(adapter, adapter->itr);
3497 } 3521 }
3498 } 3522 }
3499 3523
@@ -4576,7 +4600,7 @@ link_up:
4576 adapter->gorc - adapter->gotc) / 10000; 4600 adapter->gorc - adapter->gotc) / 10000;
4577 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; 4601 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
4578 4602
4579 ew32(ITR, 1000000000 / (itr * 256)); 4603 e1000e_write_itr(adapter, itr);
4580 } 4604 }
4581 4605
4582 /* Cause software interrupt to ensure Rx ring is cleaned */ 4606 /* Cause software interrupt to ensure Rx ring is cleaned */
@@ -6191,7 +6215,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
6191 } 6215 }
6192 6216
6193 if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) 6217 if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
6194 e_info("PHY reset is blocked due to SOL/IDER session.\n"); 6218 dev_info(&pdev->dev,
6219 "PHY reset is blocked due to SOL/IDER session.\n");
6195 6220
6196 /* Set initial default active device features */ 6221 /* Set initial default active device features */
6197 netdev->features = (NETIF_F_SG | 6222 netdev->features = (NETIF_F_SG |
@@ -6241,7 +6266,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
6241 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) 6266 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
6242 break; 6267 break;
6243 if (i == 2) { 6268 if (i == 2) {
6244 e_err("The NVM Checksum Is Not Valid\n"); 6269 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
6245 err = -EIO; 6270 err = -EIO;
6246 goto err_eeprom; 6271 goto err_eeprom;
6247 } 6272 }
@@ -6251,13 +6276,15 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
6251 6276
6252 /* copy the MAC address */ 6277 /* copy the MAC address */
6253 if (e1000e_read_mac_addr(&adapter->hw)) 6278 if (e1000e_read_mac_addr(&adapter->hw))
6254 e_err("NVM Read Error while reading MAC address\n"); 6279 dev_err(&pdev->dev,
6280 "NVM Read Error while reading MAC address\n");
6255 6281
6256 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); 6282 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
6257 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); 6283 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
6258 6284
6259 if (!is_valid_ether_addr(netdev->perm_addr)) { 6285 if (!is_valid_ether_addr(netdev->perm_addr)) {
6260 e_err("Invalid MAC Address: %pM\n", netdev->perm_addr); 6286 dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
6287 netdev->perm_addr);
6261 err = -EIO; 6288 err = -EIO;
6262 goto err_eeprom; 6289 goto err_eeprom;
6263 } 6290 }
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index 55cc1565bc2f..dfbfa7fd98c3 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -199,16 +199,19 @@ static int __devinit e1000_validate_option(unsigned int *value,
199 case enable_option: 199 case enable_option:
200 switch (*value) { 200 switch (*value) {
201 case OPTION_ENABLED: 201 case OPTION_ENABLED:
202 e_info("%s Enabled\n", opt->name); 202 dev_info(&adapter->pdev->dev, "%s Enabled\n",
203 opt->name);
203 return 0; 204 return 0;
204 case OPTION_DISABLED: 205 case OPTION_DISABLED:
205 e_info("%s Disabled\n", opt->name); 206 dev_info(&adapter->pdev->dev, "%s Disabled\n",
207 opt->name);
206 return 0; 208 return 0;
207 } 209 }
208 break; 210 break;
209 case range_option: 211 case range_option:
210 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { 212 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
211 e_info("%s set to %i\n", opt->name, *value); 213 dev_info(&adapter->pdev->dev, "%s set to %i\n",
214 opt->name, *value);
212 return 0; 215 return 0;
213 } 216 }
214 break; 217 break;
@@ -220,7 +223,8 @@ static int __devinit e1000_validate_option(unsigned int *value,
220 ent = &opt->arg.l.p[i]; 223 ent = &opt->arg.l.p[i];
221 if (*value == ent->i) { 224 if (*value == ent->i) {
222 if (ent->str[0] != '\0') 225 if (ent->str[0] != '\0')
223 e_info("%s\n", ent->str); 226 dev_info(&adapter->pdev->dev, "%s\n",
227 ent->str);
224 return 0; 228 return 0;
225 } 229 }
226 } 230 }
@@ -230,8 +234,8 @@ static int __devinit e1000_validate_option(unsigned int *value,
230 BUG(); 234 BUG();
231 } 235 }
232 236
233 e_info("Invalid %s value specified (%i) %s\n", opt->name, *value, 237 dev_info(&adapter->pdev->dev, "Invalid %s value specified (%i) %s\n",
234 opt->err); 238 opt->name, *value, opt->err);
235 *value = opt->def; 239 *value = opt->def;
236 return -1; 240 return -1;
237} 241}
@@ -251,8 +255,10 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
251 int bd = adapter->bd_number; 255 int bd = adapter->bd_number;
252 256
253 if (bd >= E1000_MAX_NIC) { 257 if (bd >= E1000_MAX_NIC) {
254 e_notice("Warning: no configuration for board #%i\n", bd); 258 dev_notice(&adapter->pdev->dev,
255 e_notice("Using defaults for all values\n"); 259 "Warning: no configuration for board #%i\n", bd);
260 dev_notice(&adapter->pdev->dev,
261 "Using defaults for all values\n");
256 } 262 }
257 263
258 { /* Transmit Interrupt Delay */ 264 { /* Transmit Interrupt Delay */
@@ -366,27 +372,32 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
366 * default values 372 * default values
367 */ 373 */
368 if (adapter->itr > 4) 374 if (adapter->itr > 4)
369 e_info("%s set to default %d\n", opt.name, 375 dev_info(&adapter->pdev->dev,
370 adapter->itr); 376 "%s set to default %d\n", opt.name,
377 adapter->itr);
371 } 378 }
372 379
373 adapter->itr_setting = adapter->itr; 380 adapter->itr_setting = adapter->itr;
374 switch (adapter->itr) { 381 switch (adapter->itr) {
375 case 0: 382 case 0:
376 e_info("%s turned off\n", opt.name); 383 dev_info(&adapter->pdev->dev, "%s turned off\n",
384 opt.name);
377 break; 385 break;
378 case 1: 386 case 1:
379 e_info("%s set to dynamic mode\n", opt.name); 387 dev_info(&adapter->pdev->dev,
388 "%s set to dynamic mode\n", opt.name);
380 adapter->itr = 20000; 389 adapter->itr = 20000;
381 break; 390 break;
382 case 3: 391 case 3:
383 e_info("%s set to dynamic conservative mode\n", 392 dev_info(&adapter->pdev->dev,
384 opt.name); 393 "%s set to dynamic conservative mode\n",
394 opt.name);
385 adapter->itr = 20000; 395 adapter->itr = 20000;
386 break; 396 break;
387 case 4: 397 case 4:
388 e_info("%s set to simplified (2000-8000 ints) mode\n", 398 dev_info(&adapter->pdev->dev,
389 opt.name); 399 "%s set to simplified (2000-8000 ints) mode\n",
400 opt.name);
390 break; 401 break;
391 default: 402 default:
392 /* 403 /*
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 35d1e4f2c92c..10efcd88dca0 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -117,6 +117,7 @@
117 117
118/* TX Rate Limit Registers */ 118/* TX Rate Limit Registers */
119#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */ 119#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */
120#define E1000_RTTBCNRM 0x3690 /* Tx BCN Rate-scheduler MMW */
120#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */ 121#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */
121 122
122/* Split and Replication RX Control - RW */ 123/* Split and Replication RX Control - RW */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index ae6d3f393a54..9e572dd29ab2 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -65,19 +65,30 @@ struct igb_adapter;
65#define MAX_Q_VECTORS 8 65#define MAX_Q_VECTORS 8
66 66
67/* Transmit and receive queues */ 67/* Transmit and receive queues */
68#define IGB_MAX_RX_QUEUES ((adapter->vfs_allocated_count ? 2 : \ 68#define IGB_MAX_RX_QUEUES 8
69 (hw->mac.type > e1000_82575 ? 8 : 4))) 69#define IGB_MAX_RX_QUEUES_82575 4
70#define IGB_MAX_RX_QUEUES_I210 4
71#define IGB_MAX_RX_QUEUES_I211 2 70#define IGB_MAX_RX_QUEUES_I211 2
72#define IGB_MAX_TX_QUEUES 16 71#define IGB_MAX_TX_QUEUES 8
73#define IGB_MAX_TX_QUEUES_I210 4
74#define IGB_MAX_TX_QUEUES_I211 2
75#define IGB_MAX_VF_MC_ENTRIES 30 72#define IGB_MAX_VF_MC_ENTRIES 30
76#define IGB_MAX_VF_FUNCTIONS 8 73#define IGB_MAX_VF_FUNCTIONS 8
77#define IGB_MAX_VFTA_ENTRIES 128 74#define IGB_MAX_VFTA_ENTRIES 128
78#define IGB_82576_VF_DEV_ID 0x10CA 75#define IGB_82576_VF_DEV_ID 0x10CA
79#define IGB_I350_VF_DEV_ID 0x1520 76#define IGB_I350_VF_DEV_ID 0x1520
80 77
78/* NVM version defines */
79#define IGB_MAJOR_MASK 0xF000
80#define IGB_MINOR_MASK 0x0FF0
81#define IGB_BUILD_MASK 0x000F
82#define IGB_COMB_VER_MASK 0x00FF
83#define IGB_MAJOR_SHIFT 12
84#define IGB_MINOR_SHIFT 4
85#define IGB_COMB_VER_SHFT 8
86#define IGB_NVM_VER_INVALID 0xFFFF
87#define IGB_ETRACK_SHIFT 16
88#define NVM_ETRACK_WORD 0x0042
89#define NVM_COMB_VER_OFF 0x0083
90#define NVM_COMB_VER_PTR 0x003d
91
81struct vf_data_storage { 92struct vf_data_storage {
82 unsigned char vf_mac_addresses[ETH_ALEN]; 93 unsigned char vf_mac_addresses[ETH_ALEN];
83 u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; 94 u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
@@ -371,6 +382,7 @@ struct igb_adapter {
371 spinlock_t tmreg_lock; 382 spinlock_t tmreg_lock;
372 struct cyclecounter cc; 383 struct cyclecounter cc;
373 struct timecounter tc; 384 struct timecounter tc;
385 char fw_version[32];
374}; 386};
375 387
376#define IGB_FLAG_HAS_MSI (1 << 0) 388#define IGB_FLAG_HAS_MSI (1 << 0)
@@ -420,6 +432,7 @@ extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
420extern bool igb_has_link(struct igb_adapter *adapter); 432extern bool igb_has_link(struct igb_adapter *adapter);
421extern void igb_set_ethtool_ops(struct net_device *); 433extern void igb_set_ethtool_ops(struct net_device *);
422extern void igb_power_up_link(struct igb_adapter *); 434extern void igb_power_up_link(struct igb_adapter *);
435extern void igb_set_fw_version(struct igb_adapter *);
423#ifdef CONFIG_IGB_PTP 436#ifdef CONFIG_IGB_PTP
424extern void igb_ptp_init(struct igb_adapter *adapter); 437extern void igb_ptp_init(struct igb_adapter *adapter);
425extern void igb_ptp_remove(struct igb_adapter *adapter); 438extern void igb_ptp_remove(struct igb_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 812d4f963bd1..a19c84cad0e9 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -710,6 +710,7 @@ static int igb_set_eeprom(struct net_device *netdev,
710 if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG))) 710 if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG)))
711 hw->nvm.ops.update(hw); 711 hw->nvm.ops.update(hw);
712 712
713 igb_set_fw_version(adapter);
713 kfree(eeprom_buff); 714 kfree(eeprom_buff);
714 return ret_val; 715 return ret_val;
715} 716}
@@ -718,20 +719,16 @@ static void igb_get_drvinfo(struct net_device *netdev,
718 struct ethtool_drvinfo *drvinfo) 719 struct ethtool_drvinfo *drvinfo)
719{ 720{
720 struct igb_adapter *adapter = netdev_priv(netdev); 721 struct igb_adapter *adapter = netdev_priv(netdev);
721 u16 eeprom_data;
722 722
723 strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver)); 723 strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver));
724 strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version)); 724 strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version));
725 725
726 /* EEPROM image version # is reported as firmware version # for 726 /*
727 * 82575 controllers */ 727 * EEPROM image version # is reported as firmware version # for
728 adapter->hw.nvm.ops.read(&adapter->hw, 5, 1, &eeprom_data); 728 * 82575 controllers
729 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 729 */
730 "%d.%d-%d", 730 strlcpy(drvinfo->fw_version, adapter->fw_version,
731 (eeprom_data & 0xF000) >> 12, 731 sizeof(drvinfo->fw_version));
732 (eeprom_data & 0x0FF0) >> 4,
733 eeprom_data & 0x000F);
734
735 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 732 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
736 sizeof(drvinfo->bus_info)); 733 sizeof(drvinfo->bus_info));
737 drvinfo->n_stats = IGB_STATS_LEN; 734 drvinfo->n_stats = IGB_STATS_LEN;
@@ -2271,6 +2268,38 @@ static void igb_ethtool_complete(struct net_device *netdev)
2271 pm_runtime_put(&adapter->pdev->dev); 2268 pm_runtime_put(&adapter->pdev->dev);
2272} 2269}
2273 2270
2271#ifdef CONFIG_IGB_PTP
2272static int igb_ethtool_get_ts_info(struct net_device *dev,
2273 struct ethtool_ts_info *info)
2274{
2275 struct igb_adapter *adapter = netdev_priv(dev);
2276
2277 info->so_timestamping =
2278 SOF_TIMESTAMPING_TX_HARDWARE |
2279 SOF_TIMESTAMPING_RX_HARDWARE |
2280 SOF_TIMESTAMPING_RAW_HARDWARE;
2281
2282 if (adapter->ptp_clock)
2283 info->phc_index = ptp_clock_index(adapter->ptp_clock);
2284 else
2285 info->phc_index = -1;
2286
2287 info->tx_types =
2288 (1 << HWTSTAMP_TX_OFF) |
2289 (1 << HWTSTAMP_TX_ON);
2290
2291 info->rx_filters =
2292 (1 << HWTSTAMP_FILTER_NONE) |
2293 (1 << HWTSTAMP_FILTER_ALL) |
2294 (1 << HWTSTAMP_FILTER_SOME) |
2295 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
2296 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
2297 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2298
2299 return 0;
2300}
2301
2302#endif
2274static const struct ethtool_ops igb_ethtool_ops = { 2303static const struct ethtool_ops igb_ethtool_ops = {
2275 .get_settings = igb_get_settings, 2304 .get_settings = igb_get_settings,
2276 .set_settings = igb_set_settings, 2305 .set_settings = igb_set_settings,
@@ -2299,6 +2328,9 @@ static const struct ethtool_ops igb_ethtool_ops = {
2299 .set_coalesce = igb_set_coalesce, 2328 .set_coalesce = igb_set_coalesce,
2300 .begin = igb_ethtool_begin, 2329 .begin = igb_ethtool_begin,
2301 .complete = igb_ethtool_complete, 2330 .complete = igb_ethtool_complete,
2331#ifdef CONFIG_IGB_PTP
2332 .get_ts_info = igb_ethtool_get_ts_info,
2333#endif
2302}; 2334};
2303 2335
2304void igb_set_ethtool_ops(struct net_device *netdev) 2336void igb_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index dd3bfe8cd36c..8adeca9787ca 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -59,9 +59,9 @@
59#endif 59#endif
60#include "igb.h" 60#include "igb.h"
61 61
62#define MAJ 3 62#define MAJ 4
63#define MIN 4 63#define MIN 0
64#define BUILD 7 64#define BUILD 1
65#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ 65#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
66__stringify(BUILD) "-k" 66__stringify(BUILD) "-k"
67char igb_driver_name[] = "igb"; 67char igb_driver_name[] = "igb";
@@ -1048,11 +1048,6 @@ static int igb_set_interrupt_capability(struct igb_adapter *adapter)
1048 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) 1048 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1049 numvecs += adapter->num_tx_queues; 1049 numvecs += adapter->num_tx_queues;
1050 1050
1051 /* i210 and i211 can only have 4 MSIX vectors for rx/tx queues. */
1052 if ((adapter->hw.mac.type == e1000_i210)
1053 || (adapter->hw.mac.type == e1000_i211))
1054 numvecs = 4;
1055
1056 /* store the number of vectors reserved for queues */ 1051 /* store the number of vectors reserved for queues */
1057 adapter->num_q_vectors = numvecs; 1052 adapter->num_q_vectors = numvecs;
1058 1053
@@ -1821,6 +1816,69 @@ static const struct net_device_ops igb_netdev_ops = {
1821}; 1816};
1822 1817
1823/** 1818/**
1819 * igb_set_fw_version - Configure version string for ethtool
1820 * @adapter: adapter struct
1821 *
1822 **/
1823void igb_set_fw_version(struct igb_adapter *adapter)
1824{
1825 struct e1000_hw *hw = &adapter->hw;
1826 u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset;
1827 u16 major, build, patch, fw_version;
1828 u32 etrack_id;
1829
1830 hw->nvm.ops.read(hw, 5, 1, &fw_version);
1831 if (adapter->hw.mac.type != e1000_i211) {
1832 hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh);
1833 hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl);
1834 etrack_id = (eeprom_verh << IGB_ETRACK_SHIFT) | eeprom_verl;
1835
1836 /* combo image version needs to be found */
1837 hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
1838 if ((comb_offset != 0x0) &&
1839 (comb_offset != IGB_NVM_VER_INVALID)) {
1840 hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
1841 + 1), 1, &comb_verh);
1842 hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
1843 1, &comb_verl);
1844
1845 /* Only display Option Rom if it exists and is valid */
1846 if ((comb_verh && comb_verl) &&
1847 ((comb_verh != IGB_NVM_VER_INVALID) &&
1848 (comb_verl != IGB_NVM_VER_INVALID))) {
1849 major = comb_verl >> IGB_COMB_VER_SHFT;
1850 build = (comb_verl << IGB_COMB_VER_SHFT) |
1851 (comb_verh >> IGB_COMB_VER_SHFT);
1852 patch = comb_verh & IGB_COMB_VER_MASK;
1853 snprintf(adapter->fw_version,
1854 sizeof(adapter->fw_version),
1855 "%d.%d%d, 0x%08x, %d.%d.%d",
1856 (fw_version & IGB_MAJOR_MASK) >>
1857 IGB_MAJOR_SHIFT,
1858 (fw_version & IGB_MINOR_MASK) >>
1859 IGB_MINOR_SHIFT,
1860 (fw_version & IGB_BUILD_MASK),
1861 etrack_id, major, build, patch);
1862 goto out;
1863 }
1864 }
1865 snprintf(adapter->fw_version, sizeof(adapter->fw_version),
1866 "%d.%d%d, 0x%08x",
1867 (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT,
1868 (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT,
1869 (fw_version & IGB_BUILD_MASK), etrack_id);
1870 } else {
1871 snprintf(adapter->fw_version, sizeof(adapter->fw_version),
1872 "%d.%d%d",
1873 (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT,
1874 (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT,
1875 (fw_version & IGB_BUILD_MASK));
1876 }
1877out:
1878 return;
1879}
1880
1881/**
1824 * igb_probe - Device Initialization Routine 1882 * igb_probe - Device Initialization Routine
1825 * @pdev: PCI device information struct 1883 * @pdev: PCI device information struct
1826 * @ent: entry in igb_pci_tbl 1884 * @ent: entry in igb_pci_tbl
@@ -2030,6 +2088,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,
2030 goto err_eeprom; 2088 goto err_eeprom;
2031 } 2089 }
2032 2090
2091 /* get firmware version for ethtool -i */
2092 igb_set_fw_version(adapter);
2093
2033 setup_timer(&adapter->watchdog_timer, igb_watchdog, 2094 setup_timer(&adapter->watchdog_timer, igb_watchdog,
2034 (unsigned long) adapter); 2095 (unsigned long) adapter);
2035 setup_timer(&adapter->phy_info_timer, igb_update_phy_info, 2096 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
@@ -2338,6 +2399,7 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
2338 struct e1000_hw *hw = &adapter->hw; 2399 struct e1000_hw *hw = &adapter->hw;
2339 struct net_device *netdev = adapter->netdev; 2400 struct net_device *netdev = adapter->netdev;
2340 struct pci_dev *pdev = adapter->pdev; 2401 struct pci_dev *pdev = adapter->pdev;
2402 u32 max_rss_queues;
2341 2403
2342 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); 2404 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
2343 2405
@@ -2370,40 +2432,69 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
2370 } else 2432 } else
2371 adapter->vfs_allocated_count = max_vfs; 2433 adapter->vfs_allocated_count = max_vfs;
2372 break; 2434 break;
2373 case e1000_i210:
2374 case e1000_i211:
2375 adapter->vfs_allocated_count = 0;
2376 break;
2377 default: 2435 default:
2378 break; 2436 break;
2379 } 2437 }
2380#endif /* CONFIG_PCI_IOV */ 2438#endif /* CONFIG_PCI_IOV */
2439
2440 /* Determine the maximum number of RSS queues supported. */
2381 switch (hw->mac.type) { 2441 switch (hw->mac.type) {
2442 case e1000_i211:
2443 max_rss_queues = IGB_MAX_RX_QUEUES_I211;
2444 break;
2445 case e1000_82575:
2382 case e1000_i210: 2446 case e1000_i210:
2383 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES_I210, 2447 max_rss_queues = IGB_MAX_RX_QUEUES_82575;
2384 num_online_cpus());
2385 break; 2448 break;
2449 case e1000_i350:
2450 /* I350 cannot do RSS and SR-IOV at the same time */
2451 if (!!adapter->vfs_allocated_count) {
2452 max_rss_queues = 1;
2453 break;
2454 }
2455 /* fall through */
2456 case e1000_82576:
2457 if (!!adapter->vfs_allocated_count) {
2458 max_rss_queues = 2;
2459 break;
2460 }
2461 /* fall through */
2462 case e1000_82580:
2463 default:
2464 max_rss_queues = IGB_MAX_RX_QUEUES;
2465 break;
2466 }
2467
2468 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
2469
2470 /* Determine if we need to pair queues. */
2471 switch (hw->mac.type) {
2472 case e1000_82575:
2386 case e1000_i211: 2473 case e1000_i211:
2387 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES_I211, 2474 /* Device supports enough interrupts without queue pairing. */
2388 num_online_cpus());
2389 break; 2475 break;
2476 case e1000_82576:
2477 /*
2478 * If VFs are going to be allocated with RSS queues then we
2479 * should pair the queues in order to conserve interrupts due
2480 * to limited supply.
2481 */
2482 if ((adapter->rss_queues > 1) &&
2483 (adapter->vfs_allocated_count > 6))
2484 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2485 /* fall through */
2486 case e1000_82580:
2487 case e1000_i350:
2488 case e1000_i210:
2390 default: 2489 default:
2391 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, 2490 /*
2392 num_online_cpus()); 2491 * If rss_queues > half of max_rss_queues, pair the queues in
2492 * order to conserve interrupts due to limited supply.
2493 */
2494 if (adapter->rss_queues > (max_rss_queues / 2))
2495 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2393 break; 2496 break;
2394 } 2497 }
2395 /* i350 cannot do RSS and SR-IOV at the same time */
2396 if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count)
2397 adapter->rss_queues = 1;
2398
2399 /*
2400 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
2401 * then we should combine the queues into a queue pair in order to
2402 * conserve interrupts due to limited supply
2403 */
2404 if ((adapter->rss_queues > 4) ||
2405 ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
2406 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2407 2498
2408 /* Setup and initialize a copy of the hw vlan table array */ 2499 /* Setup and initialize a copy of the hw vlan table array */
2409 adapter->shadow_vfta = kzalloc(sizeof(u32) * 2500 adapter->shadow_vfta = kzalloc(sizeof(u32) *
@@ -4917,7 +5008,7 @@ static int igb_vf_configure(struct igb_adapter *adapter, int vf)
4917 unsigned int device_id; 5008 unsigned int device_id;
4918 u16 thisvf_devfn; 5009 u16 thisvf_devfn;
4919 5010
4920 random_ether_addr(mac_addr); 5011 eth_random_addr(mac_addr);
4921 igb_set_vf_mac(adapter, vf, mac_addr); 5012 igb_set_vf_mac(adapter, vf, mac_addr);
4922 5013
4923 switch (adapter->hw.mac.type) { 5014 switch (adapter->hw.mac.type) {
@@ -5326,7 +5417,7 @@ static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
5326 5417
5327 /* generate a new mac address as we were hotplug removed/added */ 5418 /* generate a new mac address as we were hotplug removed/added */
5328 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC)) 5419 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
5329 random_ether_addr(vf_mac); 5420 eth_random_addr(vf_mac);
5330 5421
5331 /* process remaining reset events */ 5422 /* process remaining reset events */
5332 igb_vf_reset(adapter, vf); 5423 igb_vf_reset(adapter, vf);
@@ -5686,6 +5777,7 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
5686/** 5777/**
5687 * igb_clean_tx_irq - Reclaim resources after transmit completes 5778 * igb_clean_tx_irq - Reclaim resources after transmit completes
5688 * @q_vector: pointer to q_vector containing needed info 5779 * @q_vector: pointer to q_vector containing needed info
5780 *
5689 * returns true if ring is completely cleaned 5781 * returns true if ring is completely cleaned
5690 **/ 5782 **/
5691static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) 5783static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
@@ -6997,6 +7089,11 @@ static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
6997 } 7089 }
6998 7090
6999 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */ 7091 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
7092 /*
7093 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
7094 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
7095 */
7096 wr32(E1000_RTTBCNRM, 0x14);
7000 wr32(E1000_RTTBCNRC, bcnrc_val); 7097 wr32(E1000_RTTBCNRC, bcnrc_val);
7001} 7098}
7002 7099
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index d5ee7fa50723..c846ea9131a3 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -330,7 +330,17 @@ void igb_ptp_init(struct igb_adapter *adapter)
330 330
331void igb_ptp_remove(struct igb_adapter *adapter) 331void igb_ptp_remove(struct igb_adapter *adapter)
332{ 332{
333 cancel_delayed_work_sync(&adapter->overflow_work); 333 switch (adapter->hw.mac.type) {
334 case e1000_i211:
335 case e1000_i210:
336 case e1000_i350:
337 case e1000_82580:
338 case e1000_82576:
339 cancel_delayed_work_sync(&adapter->overflow_work);
340 break;
341 default:
342 return;
343 }
334 344
335 if (adapter->ptp_clock) { 345 if (adapter->ptp_clock) {
336 ptp_clock_unregister(adapter->ptp_clock); 346 ptp_clock_unregister(adapter->ptp_clock);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 8ec74b07f940..0696abfe9944 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -766,6 +766,7 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter)
766/** 766/**
767 * igbvf_clean_tx_irq - Reclaim resources after transmit completes 767 * igbvf_clean_tx_irq - Reclaim resources after transmit completes
768 * @adapter: board private structure 768 * @adapter: board private structure
769 *
769 * returns true if ring is completely cleaned 770 * returns true if ring is completely cleaned
770 **/ 771 **/
771static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) 772static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
index 30a6cc426037..eea0e10ce12f 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.c
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
@@ -283,7 +283,8 @@ static s32 e1000_set_vfta_vf(struct e1000_hw *hw, u16 vid, bool set)
283 return err; 283 return err;
284} 284}
285 285
286/** e1000_rlpml_set_vf - Set the maximum receive packet length 286/**
287 * e1000_rlpml_set_vf - Set the maximum receive packet length
287 * @hw: pointer to the HW structure 288 * @hw: pointer to the HW structure
288 * @max_size: value to assign to max frame size 289 * @max_size: value to assign to max frame size
289 **/ 290 **/
@@ -302,7 +303,7 @@ void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size)
302 * e1000_rar_set_vf - set device MAC address 303 * e1000_rar_set_vf - set device MAC address
303 * @hw: pointer to the HW structure 304 * @hw: pointer to the HW structure
304 * @addr: pointer to the receive address 305 * @addr: pointer to the receive address
305 * @index receive address array register 306 * @index: receive address array register
306 **/ 307 **/
307static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index) 308static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index)
308{ 309{
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 5fce363d810a..aab649f8c5f0 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -2276,9 +2276,9 @@ static void ixgb_netpoll(struct net_device *dev)
2276#endif 2276#endif
2277 2277
2278/** 2278/**
2279 * ixgb_io_error_detected() - called when PCI error is detected 2279 * ixgb_io_error_detected - called when PCI error is detected
2280 * @pdev pointer to pci device with error 2280 * @pdev: pointer to pci device with error
2281 * @state pci channel state after error 2281 * @state: pci channel state after error
2282 * 2282 *
2283 * This callback is called by the PCI subsystem whenever 2283 * This callback is called by the PCI subsystem whenever
2284 * a PCI bus error is detected. 2284 * a PCI bus error is detected.
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 0bdf06bc5c49..5fd5d04c26c9 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -34,11 +34,11 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
34 34
35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ 35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ 36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
37 ixgbe_mbx.o ixgbe_x540.o ixgbe_sysfs.o ixgbe_lib.o 37 ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o
38 38
39ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ 39ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
40 ixgbe_dcb_82599.o ixgbe_dcb_nl.o 40 ixgbe_dcb_82599.o ixgbe_dcb_nl.o
41 41
42ixgbe-$(CONFIG_IXGBE_PTP) += ixgbe_ptp.o 42ixgbe-$(CONFIG_IXGBE_PTP) += ixgbe_ptp.o
43 43ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o
44ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o 44ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 7af291e236bf..f7f6fe2255da 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -278,10 +278,16 @@ enum ixgbe_ring_f_enum {
278#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES 278#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES
279#endif /* IXGBE_FCOE */ 279#endif /* IXGBE_FCOE */
280struct ixgbe_ring_feature { 280struct ixgbe_ring_feature {
281 int indices; 281 u16 limit; /* upper limit on feature indices */
282 int mask; 282 u16 indices; /* current value of indices */
283 u16 mask; /* Mask used for feature to ring mapping */
284 u16 offset; /* offset to start of feature */
283} ____cacheline_internodealigned_in_smp; 285} ____cacheline_internodealigned_in_smp;
284 286
287#define IXGBE_82599_VMDQ_8Q_MASK 0x78
288#define IXGBE_82599_VMDQ_4Q_MASK 0x7C
289#define IXGBE_82599_VMDQ_2Q_MASK 0x7E
290
285/* 291/*
286 * FCoE requires that all Rx buffers be over 2200 bytes in length. Since 292 * FCoE requires that all Rx buffers be over 2200 bytes in length. Since
287 * this is twice the size of a half page we need to double the page order 293 * this is twice the size of a half page we need to double the page order
@@ -315,7 +321,7 @@ struct ixgbe_ring_container {
315 ? 8 : 1) 321 ? 8 : 1)
316#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS 322#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
317 323
318/* MAX_MSIX_Q_VECTORS of these are allocated, 324/* MAX_Q_VECTORS of these are allocated,
319 * but we only use one per queue-specific vector. 325 * but we only use one per queue-specific vector.
320 */ 326 */
321struct ixgbe_q_vector { 327struct ixgbe_q_vector {
@@ -401,11 +407,11 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
401#define NON_Q_VECTORS (OTHER_VECTOR) 407#define NON_Q_VECTORS (OTHER_VECTOR)
402 408
403#define MAX_MSIX_VECTORS_82599 64 409#define MAX_MSIX_VECTORS_82599 64
404#define MAX_MSIX_Q_VECTORS_82599 64 410#define MAX_Q_VECTORS_82599 64
405#define MAX_MSIX_VECTORS_82598 18 411#define MAX_MSIX_VECTORS_82598 18
406#define MAX_MSIX_Q_VECTORS_82598 16 412#define MAX_Q_VECTORS_82598 16
407 413
408#define MAX_MSIX_Q_VECTORS MAX_MSIX_Q_VECTORS_82599 414#define MAX_Q_VECTORS MAX_Q_VECTORS_82599
409#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599 415#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
410 416
411#define MIN_MSIX_Q_VECTORS 1 417#define MIN_MSIX_Q_VECTORS 1
@@ -427,35 +433,33 @@ struct ixgbe_adapter {
427 * thus the additional *_CAPABLE flags. 433 * thus the additional *_CAPABLE flags.
428 */ 434 */
429 u32 flags; 435 u32 flags;
430#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 1) 436#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 0)
431#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 2) 437#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1)
432#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 3) 438#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 2)
433#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 4) 439#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 3)
434#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 6) 440#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 4)
435#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 7) 441#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 5)
436#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 8) 442#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 6)
437#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 9) 443#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 7)
438#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 10) 444#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 8)
439#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 11) 445#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 9)
440#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 12) 446#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 10)
441#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 13) 447#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 11)
442#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 14) 448#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 12)
443#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 16) 449#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 13)
444#define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 17) 450#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 14)
445#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 18) 451#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 15)
446#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19) 452#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 16)
447#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20) 453#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 17)
448#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22) 454#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 18)
449#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 23) 455#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 19)
450#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 24) 456#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 20)
451#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 25) 457#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 21)
452#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 26) 458#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 22)
453#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 27) 459#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 23)
454#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 28)
455#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 29)
456 460
457 u32 flags2; 461 u32 flags2;
458#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1) 462#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1 << 0)
459#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1) 463#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1)
460#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2) 464#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2)
461#define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 3) 465#define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 3)
@@ -496,7 +500,7 @@ struct ixgbe_adapter {
496 u32 alloc_rx_page_failed; 500 u32 alloc_rx_page_failed;
497 u32 alloc_rx_buff_failed; 501 u32 alloc_rx_buff_failed;
498 502
499 struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; 503 struct ixgbe_q_vector *q_vector[MAX_Q_VECTORS];
500 504
501 /* DCB parameters */ 505 /* DCB parameters */
502 struct ieee_pfc *ixgbe_ieee_pfc; 506 struct ieee_pfc *ixgbe_ieee_pfc;
@@ -507,8 +511,8 @@ struct ixgbe_adapter {
507 u8 dcbx_cap; 511 u8 dcbx_cap;
508 enum ixgbe_fc_mode last_lfc_mode; 512 enum ixgbe_fc_mode last_lfc_mode;
509 513
510 int num_msix_vectors; 514 int num_q_vectors; /* current number of q_vectors for device */
511 int max_msix_q_vectors; /* true count of q_vectors for device */ 515 int max_q_vectors; /* true count of q_vectors for device */
512 struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE]; 516 struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
513 struct msix_entry *msix_entries; 517 struct msix_entry *msix_entries;
514 518
@@ -561,6 +565,7 @@ struct ixgbe_adapter {
561 spinlock_t tmreg_lock; 565 spinlock_t tmreg_lock;
562 struct cyclecounter cc; 566 struct cyclecounter cc;
563 struct timecounter tc; 567 struct timecounter tc;
568 int rx_hwtstamp_filter;
564 u32 base_incval; 569 u32 base_incval;
565 u32 cycle_speed; 570 u32 cycle_speed;
566#endif /* CONFIG_IXGBE_PTP */ 571#endif /* CONFIG_IXGBE_PTP */
@@ -704,6 +709,7 @@ extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
704extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type); 709extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
705extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, 710extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
706 struct netdev_fcoe_hbainfo *info); 711 struct netdev_fcoe_hbainfo *info);
712extern u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
707#endif /* IXGBE_FCOE */ 713#endif /* IXGBE_FCOE */
708 714
709static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring) 715static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
@@ -718,6 +724,7 @@ extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
718extern void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector, 724extern void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
719 struct sk_buff *skb); 725 struct sk_buff *skb);
720extern void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, 726extern void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
727 union ixgbe_adv_rx_desc *rx_desc,
721 struct sk_buff *skb); 728 struct sk_buff *skb);
722extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, 729extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
723 struct ifreq *ifr, int cmd); 730 struct ifreq *ifr, int cmd);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index dee64d2703f0..e7dddfd97cb9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -241,7 +241,9 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
241 241
242 /* Determine 1G link capabilities off of SFP+ type */ 242 /* Determine 1G link capabilities off of SFP+ type */
243 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || 243 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
244 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) { 244 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
245 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
246 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
245 *speed = IXGBE_LINK_SPEED_1GB_FULL; 247 *speed = IXGBE_LINK_SPEED_1GB_FULL;
246 *negotiation = true; 248 *negotiation = true;
247 goto out; 249 goto out;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 77ac41feb0fe..bb7fde45c057 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -3132,7 +3132,7 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3132} 3132}
3133 3133
3134/** 3134/**
3135 * ixgbe_get_wwn_prefix_generic Get alternative WWNN/WWPN prefix from 3135 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
3136 * the EEPROM 3136 * the EEPROM
3137 * @hw: pointer to hardware structure 3137 * @hw: pointer to hardware structure
3138 * @wwnn_prefix: the alternative WWNN prefix 3138 * @wwnn_prefix: the alternative WWNN prefix
@@ -3325,6 +3325,7 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw,
3325 * ixgbe_calculate_checksum - Calculate checksum for buffer 3325 * ixgbe_calculate_checksum - Calculate checksum for buffer
3326 * @buffer: pointer to EEPROM 3326 * @buffer: pointer to EEPROM
3327 * @length: size of EEPROM to calculate a checksum for 3327 * @length: size of EEPROM to calculate a checksum for
3328 *
3328 * Calculates the checksum for some buffer on a specified length. The 3329 * Calculates the checksum for some buffer on a specified length. The
3329 * checksum calculated is returned. 3330 * checksum calculated is returned.
3330 **/ 3331 **/
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index 8bfaaee5ac5b..5442b359141e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -180,67 +180,79 @@ out:
180 180
181void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en) 181void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en)
182{ 182{
183 int i; 183 struct tc_configuration *tc_config = &cfg->tc_config[0];
184 int tc;
184 185
185 *pfc_en = 0; 186 for (*pfc_en = 0, tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) {
186 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) 187 if (tc_config[tc].dcb_pfc != pfc_disabled)
187 *pfc_en |= !!(cfg->tc_config[i].dcb_pfc & 0xF) << i; 188 *pfc_en |= 1 << tc;
189 }
188} 190}
189 191
190void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction, 192void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction,
191 u16 *refill) 193 u16 *refill)
192{ 194{
193 struct tc_bw_alloc *p; 195 struct tc_configuration *tc_config = &cfg->tc_config[0];
194 int i; 196 int tc;
195 197
196 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 198 for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++)
197 p = &cfg->tc_config[i].path[direction]; 199 refill[tc] = tc_config[tc].path[direction].data_credits_refill;
198 refill[i] = p->data_credits_refill;
199 }
200} 200}
201 201
202void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg, u16 *max) 202void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg, u16 *max)
203{ 203{
204 int i; 204 struct tc_configuration *tc_config = &cfg->tc_config[0];
205 int tc;
205 206
206 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) 207 for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++)
207 max[i] = cfg->tc_config[i].desc_credits_max; 208 max[tc] = tc_config[tc].desc_credits_max;
208} 209}
209 210
210void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg, int direction, 211void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg, int direction,
211 u8 *bwgid) 212 u8 *bwgid)
212{ 213{
213 struct tc_bw_alloc *p; 214 struct tc_configuration *tc_config = &cfg->tc_config[0];
214 int i; 215 int tc;
215 216
216 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 217 for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++)
217 p = &cfg->tc_config[i].path[direction]; 218 bwgid[tc] = tc_config[tc].path[direction].bwg_id;
218 bwgid[i] = p->bwg_id;
219 }
220} 219}
221 220
222void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction, 221void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction,
223 u8 *ptype) 222 u8 *ptype)
224{ 223{
225 struct tc_bw_alloc *p; 224 struct tc_configuration *tc_config = &cfg->tc_config[0];
226 int i; 225 int tc;
227 226
228 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 227 for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++)
229 p = &cfg->tc_config[i].path[direction]; 228 ptype[tc] = tc_config[tc].path[direction].prio_type;
230 ptype[i] = p->prio_type; 229}
230
231u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up)
232{
233 struct tc_configuration *tc_config = &cfg->tc_config[0];
234 u8 prio_mask = 1 << up;
235 u8 tc;
236
237 /*
238 * Test for TCs 7 through 1 and report the first match we find. If
239 * we find no match we can assume that the TC is 0 since the TC must
240 * be set for all user priorities
241 */
242 for (tc = MAX_TRAFFIC_CLASS - 1; tc; tc--) {
243 if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap)
244 break;
231 } 245 }
246
247 return tc;
232} 248}
233 249
234void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map) 250void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map)
235{ 251{
236 int i, up; 252 u8 up;
237 unsigned long bitmap;
238 253
239 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 254 for (up = 0; up < MAX_USER_PRIORITY; up++)
240 bitmap = cfg->tc_config[i].path[direction].up_to_tc_bitmap; 255 map[up] = ixgbe_dcb_get_tc_from_up(cfg, direction, up);
241 for_each_set_bit(up, &bitmap, MAX_USER_PRIORITY)
242 map[up] = i;
243 }
244} 256}
245 257
246/** 258/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
index 24333b718166..1f4108ee154b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
@@ -146,6 +146,7 @@ void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *, u16 *);
146void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *, int, u8 *); 146void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *, int, u8 *);
147void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *); 147void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *);
148void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *, int, u8 *); 148void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *, int, u8 *);
149u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *, int, u8);
149 150
150/* DCB credits calculation */ 151/* DCB credits calculation */
151s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *, 152s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 5164a21b13ca..f1e002d5fa8f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -151,34 +151,21 @@ static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
151 151
152static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) 152static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
153{ 153{
154 int err = 0;
155 u8 prio_tc[MAX_USER_PRIORITY] = {0};
156 int i;
157 struct ixgbe_adapter *adapter = netdev_priv(netdev); 154 struct ixgbe_adapter *adapter = netdev_priv(netdev);
155 int err = 0;
158 156
159 /* Fail command if not in CEE mode */ 157 /* Fail command if not in CEE mode */
160 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) 158 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
161 return 1; 159 return 1;
162 160
163 /* verify there is something to do, if not then exit */ 161 /* verify there is something to do, if not then exit */
164 if (!!state != !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) 162 if (!state == !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
165 goto out;
166
167 if (state > 0) {
168 err = ixgbe_setup_tc(netdev, adapter->dcb_cfg.num_tcs.pg_tcs);
169 ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc);
170 } else {
171 err = ixgbe_setup_tc(netdev, 0);
172 }
173
174 if (err)
175 goto out; 163 goto out;
176 164
177 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 165 err = ixgbe_setup_tc(netdev,
178 netdev_set_prio_tc_map(netdev, i, prio_tc[i]); 166 state ? adapter->dcb_cfg.num_tcs.pg_tcs : 0);
179
180out: 167out:
181 return err ? 1 : 0; 168 return !!err;
182} 169}
183 170
184static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev, 171static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
@@ -584,9 +571,6 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
584 if (err) 571 if (err)
585 goto err_out; 572 goto err_out;
586 573
587 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
588 netdev_set_prio_tc_map(dev, i, ets->prio_tc[i]);
589
590 err = ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame); 574 err = ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame);
591err_out: 575err_out:
592 return err; 576 return err;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 3178f1ec3711..4104ea25d818 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -154,100 +154,60 @@ static int ixgbe_get_settings(struct net_device *netdev,
154{ 154{
155 struct ixgbe_adapter *adapter = netdev_priv(netdev); 155 struct ixgbe_adapter *adapter = netdev_priv(netdev);
156 struct ixgbe_hw *hw = &adapter->hw; 156 struct ixgbe_hw *hw = &adapter->hw;
157 ixgbe_link_speed supported_link;
157 u32 link_speed = 0; 158 u32 link_speed = 0;
159 bool autoneg;
158 bool link_up; 160 bool link_up;
159 161
160 ecmd->supported = SUPPORTED_10000baseT_Full; 162 hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
161 ecmd->autoneg = AUTONEG_ENABLE; 163
162 ecmd->transceiver = XCVR_EXTERNAL; 164 /* set the supported link speeds */
163 if ((hw->phy.media_type == ixgbe_media_type_copper) || 165 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
164 (hw->phy.multispeed_fiber)) { 166 ecmd->supported |= SUPPORTED_10000baseT_Full;
165 ecmd->supported |= (SUPPORTED_1000baseT_Full | 167 if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
166 SUPPORTED_Autoneg); 168 ecmd->supported |= SUPPORTED_1000baseT_Full;
167 169 if (supported_link & IXGBE_LINK_SPEED_100_FULL)
168 switch (hw->mac.type) { 170 ecmd->supported |= SUPPORTED_100baseT_Full;
169 case ixgbe_mac_X540: 171
170 ecmd->supported |= SUPPORTED_100baseT_Full; 172 /* set the advertised speeds */
171 break; 173 if (hw->phy.autoneg_advertised) {
172 default: 174 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
173 break; 175 ecmd->advertising |= ADVERTISED_100baseT_Full;
174 } 176 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
175 177 ecmd->advertising |= ADVERTISED_10000baseT_Full;
176 ecmd->advertising = ADVERTISED_Autoneg; 178 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
177 if (hw->phy.autoneg_advertised) { 179 ecmd->advertising |= ADVERTISED_1000baseT_Full;
178 if (hw->phy.autoneg_advertised &
179 IXGBE_LINK_SPEED_100_FULL)
180 ecmd->advertising |= ADVERTISED_100baseT_Full;
181 if (hw->phy.autoneg_advertised &
182 IXGBE_LINK_SPEED_10GB_FULL)
183 ecmd->advertising |= ADVERTISED_10000baseT_Full;
184 if (hw->phy.autoneg_advertised &
185 IXGBE_LINK_SPEED_1GB_FULL)
186 ecmd->advertising |= ADVERTISED_1000baseT_Full;
187 } else {
188 /*
189 * Default advertised modes in case
190 * phy.autoneg_advertised isn't set.
191 */
192 ecmd->advertising |= (ADVERTISED_10000baseT_Full |
193 ADVERTISED_1000baseT_Full);
194 if (hw->mac.type == ixgbe_mac_X540)
195 ecmd->advertising |= ADVERTISED_100baseT_Full;
196 }
197
198 if (hw->phy.media_type == ixgbe_media_type_copper) {
199 ecmd->supported |= SUPPORTED_TP;
200 ecmd->advertising |= ADVERTISED_TP;
201 ecmd->port = PORT_TP;
202 } else {
203 ecmd->supported |= SUPPORTED_FIBRE;
204 ecmd->advertising |= ADVERTISED_FIBRE;
205 ecmd->port = PORT_FIBRE;
206 }
207 } else if (hw->phy.media_type == ixgbe_media_type_backplane) {
208 /* Set as FIBRE until SERDES defined in kernel */
209 if (hw->device_id == IXGBE_DEV_ID_82598_BX) {
210 ecmd->supported = (SUPPORTED_1000baseT_Full |
211 SUPPORTED_FIBRE);
212 ecmd->advertising = (ADVERTISED_1000baseT_Full |
213 ADVERTISED_FIBRE);
214 ecmd->port = PORT_FIBRE;
215 ecmd->autoneg = AUTONEG_DISABLE;
216 } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) ||
217 (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) {
218 ecmd->supported |= (SUPPORTED_1000baseT_Full |
219 SUPPORTED_Autoneg |
220 SUPPORTED_FIBRE);
221 ecmd->advertising = (ADVERTISED_10000baseT_Full |
222 ADVERTISED_1000baseT_Full |
223 ADVERTISED_Autoneg |
224 ADVERTISED_FIBRE);
225 ecmd->port = PORT_FIBRE;
226 } else {
227 ecmd->supported |= (SUPPORTED_1000baseT_Full |
228 SUPPORTED_FIBRE);
229 ecmd->advertising = (ADVERTISED_10000baseT_Full |
230 ADVERTISED_1000baseT_Full |
231 ADVERTISED_FIBRE);
232 ecmd->port = PORT_FIBRE;
233 }
234 } else { 180 } else {
235 ecmd->supported |= SUPPORTED_FIBRE; 181 /* default modes in case phy.autoneg_advertised isn't set */
236 ecmd->advertising = (ADVERTISED_10000baseT_Full | 182 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
237 ADVERTISED_FIBRE); 183 ecmd->advertising |= ADVERTISED_10000baseT_Full;
238 ecmd->port = PORT_FIBRE; 184 if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
239 ecmd->autoneg = AUTONEG_DISABLE; 185 ecmd->advertising |= ADVERTISED_1000baseT_Full;
186 if (supported_link & IXGBE_LINK_SPEED_100_FULL)
187 ecmd->advertising |= ADVERTISED_100baseT_Full;
240 } 188 }
241 189
242 /* Get PHY type */ 190 if (autoneg) {
191 ecmd->supported |= SUPPORTED_Autoneg;
192 ecmd->advertising |= ADVERTISED_Autoneg;
193 ecmd->autoneg = AUTONEG_ENABLE;
194 } else
195 ecmd->autoneg = AUTONEG_DISABLE;
196
197 ecmd->transceiver = XCVR_EXTERNAL;
198
199 /* Determine the remaining settings based on the PHY type. */
243 switch (adapter->hw.phy.type) { 200 switch (adapter->hw.phy.type) {
244 case ixgbe_phy_tn: 201 case ixgbe_phy_tn:
245 case ixgbe_phy_aq: 202 case ixgbe_phy_aq:
246 case ixgbe_phy_cu_unknown: 203 case ixgbe_phy_cu_unknown:
247 /* Copper 10G-BASET */ 204 ecmd->supported |= SUPPORTED_TP;
205 ecmd->advertising |= ADVERTISED_TP;
248 ecmd->port = PORT_TP; 206 ecmd->port = PORT_TP;
249 break; 207 break;
250 case ixgbe_phy_qt: 208 case ixgbe_phy_qt:
209 ecmd->supported |= SUPPORTED_FIBRE;
210 ecmd->advertising |= ADVERTISED_FIBRE;
251 ecmd->port = PORT_FIBRE; 211 ecmd->port = PORT_FIBRE;
252 break; 212 break;
253 case ixgbe_phy_nl: 213 case ixgbe_phy_nl:
@@ -257,42 +217,59 @@ static int ixgbe_get_settings(struct net_device *netdev,
257 case ixgbe_phy_sfp_avago: 217 case ixgbe_phy_sfp_avago:
258 case ixgbe_phy_sfp_intel: 218 case ixgbe_phy_sfp_intel:
259 case ixgbe_phy_sfp_unknown: 219 case ixgbe_phy_sfp_unknown:
260 switch (adapter->hw.phy.sfp_type) {
261 /* SFP+ devices, further checking needed */ 220 /* SFP+ devices, further checking needed */
221 switch (adapter->hw.phy.sfp_type) {
262 case ixgbe_sfp_type_da_cu: 222 case ixgbe_sfp_type_da_cu:
263 case ixgbe_sfp_type_da_cu_core0: 223 case ixgbe_sfp_type_da_cu_core0:
264 case ixgbe_sfp_type_da_cu_core1: 224 case ixgbe_sfp_type_da_cu_core1:
225 ecmd->supported |= SUPPORTED_FIBRE;
226 ecmd->advertising |= ADVERTISED_FIBRE;
265 ecmd->port = PORT_DA; 227 ecmd->port = PORT_DA;
266 break; 228 break;
267 case ixgbe_sfp_type_sr: 229 case ixgbe_sfp_type_sr:
268 case ixgbe_sfp_type_lr: 230 case ixgbe_sfp_type_lr:
269 case ixgbe_sfp_type_srlr_core0: 231 case ixgbe_sfp_type_srlr_core0:
270 case ixgbe_sfp_type_srlr_core1: 232 case ixgbe_sfp_type_srlr_core1:
233 ecmd->supported |= SUPPORTED_FIBRE;
234 ecmd->advertising |= ADVERTISED_FIBRE;
271 ecmd->port = PORT_FIBRE; 235 ecmd->port = PORT_FIBRE;
272 break; 236 break;
273 case ixgbe_sfp_type_not_present: 237 case ixgbe_sfp_type_not_present:
238 ecmd->supported |= SUPPORTED_FIBRE;
239 ecmd->advertising |= ADVERTISED_FIBRE;
274 ecmd->port = PORT_NONE; 240 ecmd->port = PORT_NONE;
275 break; 241 break;
276 case ixgbe_sfp_type_1g_cu_core0: 242 case ixgbe_sfp_type_1g_cu_core0:
277 case ixgbe_sfp_type_1g_cu_core1: 243 case ixgbe_sfp_type_1g_cu_core1:
244 ecmd->supported |= SUPPORTED_TP;
245 ecmd->advertising |= ADVERTISED_TP;
278 ecmd->port = PORT_TP; 246 ecmd->port = PORT_TP;
279 ecmd->supported = SUPPORTED_TP; 247 break;
280 ecmd->advertising = (ADVERTISED_1000baseT_Full | 248 case ixgbe_sfp_type_1g_sx_core0:
281 ADVERTISED_TP); 249 case ixgbe_sfp_type_1g_sx_core1:
250 ecmd->supported |= SUPPORTED_FIBRE;
251 ecmd->advertising |= ADVERTISED_FIBRE;
252 ecmd->port = PORT_FIBRE;
282 break; 253 break;
283 case ixgbe_sfp_type_unknown: 254 case ixgbe_sfp_type_unknown:
284 default: 255 default:
256 ecmd->supported |= SUPPORTED_FIBRE;
257 ecmd->advertising |= ADVERTISED_FIBRE;
285 ecmd->port = PORT_OTHER; 258 ecmd->port = PORT_OTHER;
286 break; 259 break;
287 } 260 }
288 break; 261 break;
289 case ixgbe_phy_xaui: 262 case ixgbe_phy_xaui:
263 ecmd->supported |= SUPPORTED_FIBRE;
264 ecmd->advertising |= ADVERTISED_FIBRE;
290 ecmd->port = PORT_NONE; 265 ecmd->port = PORT_NONE;
291 break; 266 break;
292 case ixgbe_phy_unknown: 267 case ixgbe_phy_unknown:
293 case ixgbe_phy_generic: 268 case ixgbe_phy_generic:
294 case ixgbe_phy_sfp_unsupported: 269 case ixgbe_phy_sfp_unsupported:
295 default: 270 default:
271 ecmd->supported |= SUPPORTED_FIBRE;
272 ecmd->advertising |= ADVERTISED_FIBRE;
296 ecmd->port = PORT_OTHER; 273 ecmd->port = PORT_OTHER;
297 break; 274 break;
298 } 275 }
@@ -2113,7 +2090,6 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2113 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2090 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2114 struct ixgbe_q_vector *q_vector; 2091 struct ixgbe_q_vector *q_vector;
2115 int i; 2092 int i;
2116 int num_vectors;
2117 u16 tx_itr_param, rx_itr_param; 2093 u16 tx_itr_param, rx_itr_param;
2118 bool need_reset = false; 2094 bool need_reset = false;
2119 2095
@@ -2149,12 +2125,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2149 /* check the old value and enable RSC if necessary */ 2125 /* check the old value and enable RSC if necessary */
2150 need_reset = ixgbe_update_rsc(adapter); 2126 need_reset = ixgbe_update_rsc(adapter);
2151 2127
2152 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) 2128 for (i = 0; i < adapter->num_q_vectors; i++) {
2153 num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2154 else
2155 num_vectors = 1;
2156
2157 for (i = 0; i < num_vectors; i++) {
2158 q_vector = adapter->q_vector[i]; 2129 q_vector = adapter->q_vector[i];
2159 if (q_vector->tx.count && !q_vector->rx.count) 2130 if (q_vector->tx.count && !q_vector->rx.count)
2160 /* tx only */ 2131 /* tx only */
@@ -2274,10 +2245,6 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
2274{ 2245{
2275 cmd->data = 0; 2246 cmd->data = 0;
2276 2247
2277 /* if RSS is disabled then report no hashing */
2278 if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
2279 return 0;
2280
2281 /* Report default options for RSS on ixgbe */ 2248 /* Report default options for RSS on ixgbe */
2282 switch (cmd->flow_type) { 2249 switch (cmd->flow_type) {
2283 case TCP_V4_FLOW: 2250 case TCP_V4_FLOW:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index bc07933d67da..cc28c44a048c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -38,7 +38,7 @@
38 38
39/** 39/**
40 * ixgbe_fcoe_clear_ddp - clear the given ddp context 40 * ixgbe_fcoe_clear_ddp - clear the given ddp context
41 * @ddp - ptr to the ixgbe_fcoe_ddp 41 * @ddp: ptr to the ixgbe_fcoe_ddp
42 * 42 *
43 * Returns : none 43 * Returns : none
44 * 44 *
@@ -674,7 +674,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
674 if (adapter->ring_feature[RING_F_FCOE].indices) { 674 if (adapter->ring_feature[RING_F_FCOE].indices) {
675 /* Use multiple rx queues for FCoE by redirection table */ 675 /* Use multiple rx queues for FCoE by redirection table */
676 for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { 676 for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
677 fcoe_i = f->mask + i % f->indices; 677 fcoe_i = f->offset + i % f->indices;
678 fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; 678 fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
679 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; 679 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
680 IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); 680 IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
@@ -683,7 +683,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
683 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); 683 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
684 } else { 684 } else {
685 /* Use single rx queue for FCoE */ 685 /* Use single rx queue for FCoE */
686 fcoe_i = f->mask; 686 fcoe_i = f->offset;
687 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; 687 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
688 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0); 688 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0);
689 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 689 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE),
@@ -691,7 +691,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
691 (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); 691 (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
692 } 692 }
693 /* send FIP frames to the first FCoE queue */ 693 /* send FIP frames to the first FCoE queue */
694 fcoe_i = f->mask; 694 fcoe_i = f->offset;
695 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; 695 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
696 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), 696 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
697 IXGBE_ETQS_QUEUE_EN | 697 IXGBE_ETQS_QUEUE_EN |
@@ -770,7 +770,7 @@ int ixgbe_fcoe_enable(struct net_device *netdev)
770 ixgbe_clear_interrupt_scheme(adapter); 770 ixgbe_clear_interrupt_scheme(adapter);
771 771
772 adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; 772 adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
773 adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE; 773 adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE;
774 netdev->features |= NETIF_F_FCOE_CRC; 774 netdev->features |= NETIF_F_FCOE_CRC;
775 netdev->features |= NETIF_F_FSO; 775 netdev->features |= NETIF_F_FSO;
776 netdev->features |= NETIF_F_FCOE_MTU; 776 netdev->features |= NETIF_F_FCOE_MTU;
@@ -960,3 +960,18 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
960 960
961 return 0; 961 return 0;
962} 962}
963
964/**
965 * ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to
966 * @adapter - pointer to the device adapter structure
967 *
968 * Return : TC that FCoE is mapped to
969 */
970u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter)
971{
972#ifdef CONFIG_IXGBE_DCB
973 return netdev_get_prio_tc_map(adapter->netdev, adapter->fcoe.up);
974#else
975 return 0;
976#endif
977}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index c377706e81a8..38d1b65777ad 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -28,28 +28,83 @@
28#include "ixgbe.h" 28#include "ixgbe.h"
29#include "ixgbe_sriov.h" 29#include "ixgbe_sriov.h"
30 30
31#ifdef CONFIG_IXGBE_DCB
31/** 32/**
32 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS 33 * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV
33 * @adapter: board private structure to initialize 34 * @adapter: board private structure to initialize
34 * 35 *
35 * Cache the descriptor ring offsets for RSS to the assigned rings. 36 * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It
37 * will also try to cache the proper offsets if RSS/FCoE are enabled along
38 * with VMDq.
36 * 39 *
37 **/ 40 **/
38static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) 41static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
39{ 42{
43#ifdef IXGBE_FCOE
44 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
45#endif /* IXGBE_FCOE */
46 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
40 int i; 47 int i;
48 u16 reg_idx;
49 u8 tcs = netdev_get_num_tc(adapter->netdev);
41 50
42 if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) 51 /* verify we have DCB queueing enabled before proceeding */
52 if (tcs <= 1)
43 return false; 53 return false;
44 54
45 for (i = 0; i < adapter->num_rx_queues; i++) 55 /* verify we have VMDq enabled before proceeding */
46 adapter->rx_ring[i]->reg_idx = i; 56 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
47 for (i = 0; i < adapter->num_tx_queues; i++) 57 return false;
48 adapter->tx_ring[i]->reg_idx = i; 58
59 /* start at VMDq register offset for SR-IOV enabled setups */
60 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
61 for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
62 /* If we are greater than indices move to next pool */
63 if ((reg_idx & ~vmdq->mask) >= tcs)
64 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
65 adapter->rx_ring[i]->reg_idx = reg_idx;
66 }
67
68 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
69 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
70 /* If we are greater than indices move to next pool */
71 if ((reg_idx & ~vmdq->mask) >= tcs)
72 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
73 adapter->tx_ring[i]->reg_idx = reg_idx;
74 }
75
76#ifdef IXGBE_FCOE
77 /* nothing to do if FCoE is disabled */
78 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
79 return true;
80
81 /* The work is already done if the FCoE ring is shared */
82 if (fcoe->offset < tcs)
83 return true;
84
85 /* The FCoE rings exist separately, we need to move their reg_idx */
86 if (fcoe->indices) {
87 u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
88 u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter);
89
90 reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
91 for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
92 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
93 adapter->rx_ring[i]->reg_idx = reg_idx;
94 reg_idx++;
95 }
96
97 reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
98 for (i = fcoe->offset; i < adapter->num_tx_queues; i++) {
99 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
100 adapter->tx_ring[i]->reg_idx = reg_idx;
101 reg_idx++;
102 }
103 }
49 104
105#endif /* IXGBE_FCOE */
50 return true; 106 return true;
51} 107}
52#ifdef CONFIG_IXGBE_DCB
53 108
54/* ixgbe_get_first_reg_idx - Return first register index associated with ring */ 109/* ixgbe_get_first_reg_idx - Return first register index associated with ring */
55static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, 110static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
@@ -64,42 +119,37 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
64 119
65 switch (hw->mac.type) { 120 switch (hw->mac.type) {
66 case ixgbe_mac_82598EB: 121 case ixgbe_mac_82598EB:
67 *tx = tc << 2; 122 /* TxQs/TC: 4 RxQs/TC: 8 */
68 *rx = tc << 3; 123 *tx = tc << 2; /* 0, 4, 8, 12, 16, 20, 24, 28 */
124 *rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */
69 break; 125 break;
70 case ixgbe_mac_82599EB: 126 case ixgbe_mac_82599EB:
71 case ixgbe_mac_X540: 127 case ixgbe_mac_X540:
72 if (num_tcs > 4) { 128 if (num_tcs > 4) {
73 if (tc < 3) { 129 /*
74 *tx = tc << 5; 130 * TCs : TC0/1 TC2/3 TC4-7
75 *rx = tc << 4; 131 * TxQs/TC: 32 16 8
76 } else if (tc < 5) { 132 * RxQs/TC: 16 16 16
77 *tx = ((tc + 2) << 4); 133 */
78 *rx = tc << 4; 134 *rx = tc << 4;
79 } else if (tc < num_tcs) { 135 if (tc < 3)
80 *tx = ((tc + 8) << 3); 136 *tx = tc << 5; /* 0, 32, 64 */
81 *rx = tc << 4; 137 else if (tc < 5)
82 } 138 *tx = (tc + 2) << 4; /* 80, 96 */
139 else
140 *tx = (tc + 8) << 3; /* 104, 112, 120 */
83 } else { 141 } else {
84 *rx = tc << 5; 142 /*
85 switch (tc) { 143 * TCs : TC0 TC1 TC2/3
86 case 0: 144 * TxQs/TC: 64 32 16
87 *tx = 0; 145 * RxQs/TC: 32 32 32
88 break; 146 */
89 case 1: 147 *rx = tc << 5;
90 *tx = 64; 148 if (tc < 2)
91 break; 149 *tx = tc << 6; /* 0, 64 */
92 case 2: 150 else
93 *tx = 96; 151 *tx = (tc + 4) << 4; /* 96, 112 */
94 break;
95 case 3:
96 *tx = 112;
97 break;
98 default:
99 break;
100 }
101 } 152 }
102 break;
103 default: 153 default:
104 break; 154 break;
105 } 155 }
@@ -112,106 +162,115 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
112 * Cache the descriptor ring offsets for DCB to the assigned rings. 162 * Cache the descriptor ring offsets for DCB to the assigned rings.
113 * 163 *
114 **/ 164 **/
115static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) 165static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
116{ 166{
117 struct net_device *dev = adapter->netdev; 167 struct net_device *dev = adapter->netdev;
118 int i, j, k; 168 unsigned int tx_idx, rx_idx;
169 int tc, offset, rss_i, i;
119 u8 num_tcs = netdev_get_num_tc(dev); 170 u8 num_tcs = netdev_get_num_tc(dev);
120 171
121 if (!num_tcs) 172 /* verify we have DCB queueing enabled before proceeding */
173 if (num_tcs <= 1)
122 return false; 174 return false;
123 175
124 for (i = 0, k = 0; i < num_tcs; i++) { 176 rss_i = adapter->ring_feature[RING_F_RSS].indices;
125 unsigned int tx_s, rx_s;
126 u16 count = dev->tc_to_txq[i].count;
127 177
128 ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s); 178 for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) {
129 for (j = 0; j < count; j++, k++) { 179 ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx);
130 adapter->tx_ring[k]->reg_idx = tx_s + j; 180 for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
131 adapter->rx_ring[k]->reg_idx = rx_s + j; 181 adapter->tx_ring[offset + i]->reg_idx = tx_idx;
132 adapter->tx_ring[k]->dcb_tc = i; 182 adapter->rx_ring[offset + i]->reg_idx = rx_idx;
133 adapter->rx_ring[k]->dcb_tc = i; 183 adapter->tx_ring[offset + i]->dcb_tc = tc;
184 adapter->rx_ring[offset + i]->dcb_tc = tc;
134 } 185 }
135 } 186 }
136 187
137 return true; 188 return true;
138} 189}
139#endif
140 190
191#endif
141/** 192/**
142 * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director 193 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
143 * @adapter: board private structure to initialize 194 * @adapter: board private structure to initialize
144 * 195 *
145 * Cache the descriptor ring offsets for Flow Director to the assigned rings. 196 * SR-IOV doesn't use any descriptor rings but changes the default if
197 * no other mapping is used.
146 * 198 *
147 **/ 199 */
148static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) 200static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
149{ 201{
202#ifdef IXGBE_FCOE
203 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
204#endif /* IXGBE_FCOE */
205 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
206 struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
150 int i; 207 int i;
151 bool ret = false; 208 u16 reg_idx;
152 209
153 if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && 210 /* only proceed if VMDq is enabled */
154 (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) { 211 if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
155 for (i = 0; i < adapter->num_rx_queues; i++) 212 return false;
156 adapter->rx_ring[i]->reg_idx = i; 213
157 for (i = 0; i < adapter->num_tx_queues; i++) 214 /* start at VMDq register offset for SR-IOV enabled setups */
158 adapter->tx_ring[i]->reg_idx = i; 215 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
159 ret = true; 216 for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
217#ifdef IXGBE_FCOE
218 /* Allow first FCoE queue to be mapped as RSS */
219 if (fcoe->offset && (i > fcoe->offset))
220 break;
221#endif
222 /* If we are greater than indices move to next pool */
223 if ((reg_idx & ~vmdq->mask) >= rss->indices)
224 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
225 adapter->rx_ring[i]->reg_idx = reg_idx;
160 } 226 }
161 227
162 return ret; 228#ifdef IXGBE_FCOE
163} 229 /* FCoE uses a linear block of queues so just assigning 1:1 */
230 for (; i < adapter->num_rx_queues; i++, reg_idx++)
231 adapter->rx_ring[i]->reg_idx = reg_idx;
164 232
233#endif
234 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
235 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
165#ifdef IXGBE_FCOE 236#ifdef IXGBE_FCOE
166/** 237 /* Allow first FCoE queue to be mapped as RSS */
167 * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE 238 if (fcoe->offset && (i > fcoe->offset))
168 * @adapter: board private structure to initialize 239 break;
169 * 240#endif
170 * Cache the descriptor ring offsets for FCoE mode to the assigned rings. 241 /* If we are greater than indices move to next pool */
171 * 242 if ((reg_idx & rss->mask) >= rss->indices)
172 */ 243 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
173static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) 244 adapter->tx_ring[i]->reg_idx = reg_idx;
174{ 245 }
175 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
176 int i;
177 u8 fcoe_rx_i = 0, fcoe_tx_i = 0;
178 246
179 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) 247#ifdef IXGBE_FCOE
180 return false; 248 /* FCoE uses a linear block of queues so just assigning 1:1 */
249 for (; i < adapter->num_tx_queues; i++, reg_idx++)
250 adapter->tx_ring[i]->reg_idx = reg_idx;
181 251
182 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 252#endif
183 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
184 ixgbe_cache_ring_fdir(adapter);
185 else
186 ixgbe_cache_ring_rss(adapter);
187 253
188 fcoe_rx_i = f->mask;
189 fcoe_tx_i = f->mask;
190 }
191 for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
192 adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
193 adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
194 }
195 return true; 254 return true;
196} 255}
197 256
198#endif /* IXGBE_FCOE */
199/** 257/**
200 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov 258 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
201 * @adapter: board private structure to initialize 259 * @adapter: board private structure to initialize
202 * 260 *
203 * SR-IOV doesn't use any descriptor rings but changes the default if 261 * Cache the descriptor ring offsets for RSS to the assigned rings.
204 * no other mapping is used.
205 * 262 *
206 */ 263 **/
207static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) 264static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
208{ 265{
209 adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2; 266 int i;
210 adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2; 267
211 if (adapter->num_vfs) 268 for (i = 0; i < adapter->num_rx_queues; i++)
212 return true; 269 adapter->rx_ring[i]->reg_idx = i;
213 else 270 for (i = 0; i < adapter->num_tx_queues; i++)
214 return false; 271 adapter->tx_ring[i]->reg_idx = i;
272
273 return true;
215} 274}
216 275
217/** 276/**
@@ -231,186 +290,372 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
231 adapter->rx_ring[0]->reg_idx = 0; 290 adapter->rx_ring[0]->reg_idx = 0;
232 adapter->tx_ring[0]->reg_idx = 0; 291 adapter->tx_ring[0]->reg_idx = 0;
233 292
234 if (ixgbe_cache_ring_sriov(adapter))
235 return;
236
237#ifdef CONFIG_IXGBE_DCB 293#ifdef CONFIG_IXGBE_DCB
238 if (ixgbe_cache_ring_dcb(adapter)) 294 if (ixgbe_cache_ring_dcb_sriov(adapter))
239 return; 295 return;
240#endif
241 296
242#ifdef IXGBE_FCOE 297 if (ixgbe_cache_ring_dcb(adapter))
243 if (ixgbe_cache_ring_fcoe(adapter))
244 return; 298 return;
245#endif /* IXGBE_FCOE */
246 299
247 if (ixgbe_cache_ring_fdir(adapter)) 300#endif
301 if (ixgbe_cache_ring_sriov(adapter))
248 return; 302 return;
249 303
250 if (ixgbe_cache_ring_rss(adapter)) 304 ixgbe_cache_ring_rss(adapter);
251 return;
252} 305}
253 306
254/** 307#define IXGBE_RSS_16Q_MASK 0xF
255 * ixgbe_set_sriov_queues: Allocate queues for IOV use 308#define IXGBE_RSS_8Q_MASK 0x7
256 * @adapter: board private structure to initialize 309#define IXGBE_RSS_4Q_MASK 0x3
257 * 310#define IXGBE_RSS_2Q_MASK 0x1
258 * IOV doesn't actually use anything, so just NAK the 311#define IXGBE_RSS_DISABLED_MASK 0x0
259 * request for now and let the other queue routines
260 * figure out what to do.
261 */
262static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
263{
264 return false;
265}
266 312
313#ifdef CONFIG_IXGBE_DCB
267/** 314/**
268 * ixgbe_set_rss_queues: Allocate queues for RSS 315 * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB
269 * @adapter: board private structure to initialize 316 * @adapter: board private structure to initialize
270 * 317 *
271 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try 318 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
272 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. 319 * and VM pools where appropriate. Also assign queues based on DCB
320 * priorities and map accordingly..
273 * 321 *
274 **/ 322 **/
275static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) 323static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
276{ 324{
277 bool ret = false; 325 int i;
278 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS]; 326 u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
279 327 u16 vmdq_m = 0;
280 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 328#ifdef IXGBE_FCOE
281 f->mask = 0xF; 329 u16 fcoe_i = 0;
282 adapter->num_rx_queues = f->indices; 330#endif
283 adapter->num_tx_queues = f->indices; 331 u8 tcs = netdev_get_num_tc(adapter->netdev);
284 ret = true; 332
333 /* verify we have DCB queueing enabled before proceeding */
334 if (tcs <= 1)
335 return false;
336
337 /* verify we have VMDq enabled before proceeding */
338 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
339 return false;
340
341 /* Add starting offset to total pool count */
342 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
343
344 /* 16 pools w/ 8 TC per pool */
345 if (tcs > 4) {
346 vmdq_i = min_t(u16, vmdq_i, 16);
347 vmdq_m = IXGBE_82599_VMDQ_8Q_MASK;
348 /* 32 pools w/ 4 TC per pool */
349 } else {
350 vmdq_i = min_t(u16, vmdq_i, 32);
351 vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
285 } 352 }
286 353
287 return ret; 354#ifdef IXGBE_FCOE
288} 355 /* queues in the remaining pools are available for FCoE */
356 fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i;
289 357
290/** 358#endif
291 * ixgbe_set_fdir_queues: Allocate queues for Flow Director 359 /* remove the starting offset from the pool count */
292 * @adapter: board private structure to initialize 360 vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
293 *
294 * Flow Director is an advanced Rx filter, attempting to get Rx flows back
295 * to the original CPU that initiated the Tx session. This runs in addition
296 * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
297 * Rx load across CPUs using RSS.
298 *
299 **/
300static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
301{
302 bool ret = false;
303 struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
304 361
305 f_fdir->indices = min_t(int, num_online_cpus(), f_fdir->indices); 362 /* save features for later use */
306 f_fdir->mask = 0; 363 adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
364 adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
307 365
308 /* 366 /*
309 * Use RSS in addition to Flow Director to ensure the best 367 * We do not support DCB, VMDq, and RSS all simultaneously
310 * distribution of flows across cores, even when an FDIR flow 368 * so we will disable RSS since it is the lowest priority
311 * isn't matched.
312 */ 369 */
313 if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && 370 adapter->ring_feature[RING_F_RSS].indices = 1;
314 (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) { 371 adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK;
315 adapter->num_tx_queues = f_fdir->indices; 372
316 adapter->num_rx_queues = f_fdir->indices; 373 adapter->num_rx_pools = vmdq_i;
317 ret = true; 374 adapter->num_rx_queues_per_pool = tcs;
318 } else { 375
319 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 376 adapter->num_tx_queues = vmdq_i * tcs;
377 adapter->num_rx_queues = vmdq_i * tcs;
378
379#ifdef IXGBE_FCOE
380 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
381 struct ixgbe_ring_feature *fcoe;
382
383 fcoe = &adapter->ring_feature[RING_F_FCOE];
384
385 /* limit ourselves based on feature limits */
386 fcoe_i = min_t(u16, fcoe_i, num_online_cpus());
387 fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
388
389 if (fcoe_i) {
390 /* alloc queues for FCoE separately */
391 fcoe->indices = fcoe_i;
392 fcoe->offset = vmdq_i * tcs;
393
394 /* add queues to adapter */
395 adapter->num_tx_queues += fcoe_i;
396 adapter->num_rx_queues += fcoe_i;
397 } else if (tcs > 1) {
398 /* use queue belonging to FcoE TC */
399 fcoe->indices = 1;
400 fcoe->offset = ixgbe_fcoe_get_tc(adapter);
401 } else {
402 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
403
404 fcoe->indices = 0;
405 fcoe->offset = 0;
406 }
320 } 407 }
321 return ret; 408
409#endif /* IXGBE_FCOE */
410 /* configure TC to queue mapping */
411 for (i = 0; i < tcs; i++)
412 netdev_set_tc_queue(adapter->netdev, i, 1, i);
413
414 return true;
322} 415}
323 416
417static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
418{
419 struct net_device *dev = adapter->netdev;
420 struct ixgbe_ring_feature *f;
421 int rss_i, rss_m, i;
422 int tcs;
423
424 /* Map queue offset and counts onto allocated tx queues */
425 tcs = netdev_get_num_tc(dev);
426
427 /* verify we have DCB queueing enabled before proceeding */
428 if (tcs <= 1)
429 return false;
430
431 /* determine the upper limit for our current DCB mode */
432 rss_i = dev->num_tx_queues / tcs;
433 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
434 /* 8 TC w/ 4 queues per TC */
435 rss_i = min_t(u16, rss_i, 4);
436 rss_m = IXGBE_RSS_4Q_MASK;
437 } else if (tcs > 4) {
438 /* 8 TC w/ 8 queues per TC */
439 rss_i = min_t(u16, rss_i, 8);
440 rss_m = IXGBE_RSS_8Q_MASK;
441 } else {
442 /* 4 TC w/ 16 queues per TC */
443 rss_i = min_t(u16, rss_i, 16);
444 rss_m = IXGBE_RSS_16Q_MASK;
445 }
446
447 /* set RSS mask and indices */
448 f = &adapter->ring_feature[RING_F_RSS];
449 rss_i = min_t(int, rss_i, f->limit);
450 f->indices = rss_i;
451 f->mask = rss_m;
452
324#ifdef IXGBE_FCOE 453#ifdef IXGBE_FCOE
454 /* FCoE enabled queues require special configuration indexed
455 * by feature specific indices and offset. Here we map FCoE
456 * indices onto the DCB queue pairs allowing FCoE to own
457 * configuration later.
458 */
459 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
460 u8 tc = ixgbe_fcoe_get_tc(adapter);
461
462 f = &adapter->ring_feature[RING_F_FCOE];
463 f->indices = min_t(u16, rss_i, f->limit);
464 f->offset = rss_i * tc;
465 }
466
467#endif /* IXGBE_FCOE */
468 for (i = 0; i < tcs; i++)
469 netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
470
471 adapter->num_tx_queues = rss_i * tcs;
472 adapter->num_rx_queues = rss_i * tcs;
473
474 return true;
475}
476
477#endif
325/** 478/**
326 * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE) 479 * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices
327 * @adapter: board private structure to initialize 480 * @adapter: board private structure to initialize
328 * 481 *
329 * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges. 482 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
330 * The ring feature mask is not used as a mask for FCoE, as it can take any 8 483 * and VM pools where appropriate. If RSS is available, then also try and
331 * rx queues out of the max number of rx queues, instead, it is used as the 484 * enable RSS and map accordingly.
332 * index of the first rx queue used by FCoE.
333 * 485 *
334 **/ 486 **/
335static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) 487static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
336{ 488{
337 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; 489 u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
490 u16 vmdq_m = 0;
491 u16 rss_i = adapter->ring_feature[RING_F_RSS].limit;
492 u16 rss_m = IXGBE_RSS_DISABLED_MASK;
493#ifdef IXGBE_FCOE
494 u16 fcoe_i = 0;
495#endif
338 496
339 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) 497 /* only proceed if SR-IOV is enabled */
498 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
340 return false; 499 return false;
341 500
342 f->indices = min_t(int, num_online_cpus(), f->indices); 501 /* Add starting offset to total pool count */
502 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
343 503
344 adapter->num_rx_queues = 1; 504 /* double check we are limited to maximum pools */
345 adapter->num_tx_queues = 1; 505 vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
346 506
347 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 507 /* 64 pool mode with 2 queues per pool */
348 e_info(probe, "FCoE enabled with RSS\n"); 508 if ((vmdq_i > 32) || (rss_i < 4)) {
349 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) 509 vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
350 ixgbe_set_fdir_queues(adapter); 510 rss_m = IXGBE_RSS_2Q_MASK;
351 else 511 rss_i = min_t(u16, rss_i, 2);
352 ixgbe_set_rss_queues(adapter); 512 /* 32 pool mode with 4 queues per pool */
513 } else {
514 vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
515 rss_m = IXGBE_RSS_4Q_MASK;
516 rss_i = 4;
353 } 517 }
354 518
355 /* adding FCoE rx rings to the end */ 519#ifdef IXGBE_FCOE
356 f->mask = adapter->num_rx_queues; 520 /* queues in the remaining pools are available for FCoE */
357 adapter->num_rx_queues += f->indices; 521 fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m));
358 adapter->num_tx_queues += f->indices; 522
523#endif
524 /* remove the starting offset from the pool count */
525 vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
526
527 /* save features for later use */
528 adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
529 adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
530
531 /* limit RSS based on user input and save for later use */
532 adapter->ring_feature[RING_F_RSS].indices = rss_i;
533 adapter->ring_feature[RING_F_RSS].mask = rss_m;
534
535 adapter->num_rx_pools = vmdq_i;
536 adapter->num_rx_queues_per_pool = rss_i;
537
538 adapter->num_rx_queues = vmdq_i * rss_i;
539 adapter->num_tx_queues = vmdq_i * rss_i;
540
541 /* disable ATR as it is not supported when VMDq is enabled */
542 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
543
544#ifdef IXGBE_FCOE
545 /*
546 * FCoE can use rings from adjacent buffers to allow RSS
547 * like behavior. To account for this we need to add the
548 * FCoE indices to the total ring count.
549 */
550 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
551 struct ixgbe_ring_feature *fcoe;
552
553 fcoe = &adapter->ring_feature[RING_F_FCOE];
554
555 /* limit ourselves based on feature limits */
556 fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
557
558 if (vmdq_i > 1 && fcoe_i) {
559 /* reserve no more than number of CPUs */
560 fcoe_i = min_t(u16, fcoe_i, num_online_cpus());
561
562 /* alloc queues for FCoE separately */
563 fcoe->indices = fcoe_i;
564 fcoe->offset = vmdq_i * rss_i;
565 } else {
566 /* merge FCoE queues with RSS queues */
567 fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus());
568
569 /* limit indices to rss_i if MSI-X is disabled */
570 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
571 fcoe_i = rss_i;
572
573 /* attempt to reserve some queues for just FCoE */
574 fcoe->indices = min_t(u16, fcoe_i, fcoe->limit);
575 fcoe->offset = fcoe_i - fcoe->indices;
576
577 fcoe_i -= rss_i;
578 }
579
580 /* add queues to adapter */
581 adapter->num_tx_queues += fcoe_i;
582 adapter->num_rx_queues += fcoe_i;
583 }
359 584
585#endif
360 return true; 586 return true;
361} 587}
362#endif /* IXGBE_FCOE */
363
364/* Artificial max queue cap per traffic class in DCB mode */
365#define DCB_QUEUE_CAP 8
366 588
367#ifdef CONFIG_IXGBE_DCB 589/**
368static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) 590 * ixgbe_set_rss_queues - Allocate queues for RSS
591 * @adapter: board private structure to initialize
592 *
593 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
594 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
595 *
596 **/
597static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
369{ 598{
370 int per_tc_q, q, i, offset = 0; 599 struct ixgbe_ring_feature *f;
371 struct net_device *dev = adapter->netdev; 600 u16 rss_i;
372 int tcs = netdev_get_num_tc(dev);
373 601
374 if (!tcs) 602 /* set mask for 16 queue limit of RSS */
375 return false; 603 f = &adapter->ring_feature[RING_F_RSS];
604 rss_i = f->limit;
376 605
377 /* Map queue offset and counts onto allocated tx queues */ 606 f->indices = rss_i;
378 per_tc_q = min_t(unsigned int, dev->num_tx_queues / tcs, DCB_QUEUE_CAP); 607 f->mask = IXGBE_RSS_16Q_MASK;
379 q = min_t(int, num_online_cpus(), per_tc_q);
380 608
381 for (i = 0; i < tcs; i++) { 609 /*
382 netdev_set_tc_queue(dev, i, q, offset); 610 * Use Flow Director in addition to RSS to ensure the best
383 offset += q; 611 * distribution of flows across cores, even when an FDIR flow
384 } 612 * isn't matched.
613 */
614 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
615 f = &adapter->ring_feature[RING_F_FDIR];
385 616
386 adapter->num_tx_queues = q * tcs; 617 f->indices = min_t(u16, num_online_cpus(), f->limit);
387 adapter->num_rx_queues = q * tcs; 618 rss_i = max_t(u16, rss_i, f->indices);
619 }
388 620
389#ifdef IXGBE_FCOE 621#ifdef IXGBE_FCOE
390 /* FCoE enabled queues require special configuration indexed 622 /*
391 * by feature specific indices and mask. Here we map FCoE 623 * FCoE can exist on the same rings as standard network traffic
392 * indices onto the DCB queue pairs allowing FCoE to own 624 * however it is preferred to avoid that if possible. In order
393 * configuration later. 625 * to get the best performance we allocate as many FCoE queues
626 * as we can and we place them at the end of the ring array to
627 * avoid sharing queues with standard RSS on systems with 24 or
628 * more CPUs.
394 */ 629 */
395 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 630 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
396 u8 prio_tc[MAX_USER_PRIORITY] = {0}; 631 struct net_device *dev = adapter->netdev;
397 int tc; 632 u16 fcoe_i;
398 struct ixgbe_ring_feature *f = 633
399 &adapter->ring_feature[RING_F_FCOE]; 634 f = &adapter->ring_feature[RING_F_FCOE];
400 635
401 ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc); 636 /* merge FCoE queues with RSS queues */
402 tc = prio_tc[adapter->fcoe.up]; 637 fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus());
403 f->indices = dev->tc_to_txq[tc].count; 638 fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues);
404 f->mask = dev->tc_to_txq[tc].offset; 639
640 /* limit indices to rss_i if MSI-X is disabled */
641 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
642 fcoe_i = rss_i;
643
644 /* attempt to reserve some queues for just FCoE */
645 f->indices = min_t(u16, fcoe_i, f->limit);
646 f->offset = fcoe_i - f->indices;
647 rss_i = max_t(u16, fcoe_i, rss_i);
405 } 648 }
406#endif 649
650#endif /* IXGBE_FCOE */
651 adapter->num_rx_queues = rss_i;
652 adapter->num_tx_queues = rss_i;
407 653
408 return true; 654 return true;
409} 655}
410#endif
411 656
412/** 657/**
413 * ixgbe_set_num_queues: Allocate queues for device, feature dependent 658 * ixgbe_set_num_queues - Allocate queues for device, feature dependent
414 * @adapter: board private structure to initialize 659 * @adapter: board private structure to initialize
415 * 660 *
416 * This is the top level queue allocation routine. The order here is very 661 * This is the top level queue allocation routine. The order here is very
@@ -420,7 +665,7 @@ static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
420 * fallthrough conditions. 665 * fallthrough conditions.
421 * 666 *
422 **/ 667 **/
423static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter) 668static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
424{ 669{
425 /* Start with base case */ 670 /* Start with base case */
426 adapter->num_rx_queues = 1; 671 adapter->num_rx_queues = 1;
@@ -428,38 +673,18 @@ static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
428 adapter->num_rx_pools = adapter->num_rx_queues; 673 adapter->num_rx_pools = adapter->num_rx_queues;
429 adapter->num_rx_queues_per_pool = 1; 674 adapter->num_rx_queues_per_pool = 1;
430 675
431 if (ixgbe_set_sriov_queues(adapter))
432 goto done;
433
434#ifdef CONFIG_IXGBE_DCB 676#ifdef CONFIG_IXGBE_DCB
677 if (ixgbe_set_dcb_sriov_queues(adapter))
678 return;
679
435 if (ixgbe_set_dcb_queues(adapter)) 680 if (ixgbe_set_dcb_queues(adapter))
436 goto done; 681 return;
437 682
438#endif 683#endif
439#ifdef IXGBE_FCOE 684 if (ixgbe_set_sriov_queues(adapter))
440 if (ixgbe_set_fcoe_queues(adapter)) 685 return;
441 goto done;
442
443#endif /* IXGBE_FCOE */
444 if (ixgbe_set_fdir_queues(adapter))
445 goto done;
446
447 if (ixgbe_set_rss_queues(adapter))
448 goto done;
449
450 /* fallback to base case */
451 adapter->num_rx_queues = 1;
452 adapter->num_tx_queues = 1;
453
454done:
455 if ((adapter->netdev->reg_state == NETREG_UNREGISTERED) ||
456 (adapter->netdev->reg_state == NETREG_UNREGISTERING))
457 return 0;
458 686
459 /* Notify the stack of the (possibly) reduced queue counts. */ 687 ixgbe_set_rss_queues(adapter);
460 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
461 return netif_set_real_num_rx_queues(adapter->netdev,
462 adapter->num_rx_queues);
463} 688}
464 689
465static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, 690static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
@@ -507,8 +732,8 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
507 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of 732 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
508 * vectors we were allocated. 733 * vectors we were allocated.
509 */ 734 */
510 adapter->num_msix_vectors = min(vectors, 735 vectors -= NON_Q_VECTORS;
511 adapter->max_msix_q_vectors + NON_Q_VECTORS); 736 adapter->num_q_vectors = min(vectors, adapter->max_q_vectors);
512 } 737 }
513} 738}
514 739
@@ -632,8 +857,8 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
632 if (adapter->netdev->features & NETIF_F_FCOE_MTU) { 857 if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
633 struct ixgbe_ring_feature *f; 858 struct ixgbe_ring_feature *f;
634 f = &adapter->ring_feature[RING_F_FCOE]; 859 f = &adapter->ring_feature[RING_F_FCOE];
635 if ((rxr_idx >= f->mask) && 860 if ((rxr_idx >= f->offset) &&
636 (rxr_idx < f->mask + f->indices)) 861 (rxr_idx < f->offset + f->indices))
637 set_bit(__IXGBE_RX_FCOE, &ring->state); 862 set_bit(__IXGBE_RX_FCOE, &ring->state);
638 } 863 }
639 864
@@ -695,7 +920,7 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
695 **/ 920 **/
696static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) 921static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
697{ 922{
698 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 923 int q_vectors = adapter->num_q_vectors;
699 int rxr_remaining = adapter->num_rx_queues; 924 int rxr_remaining = adapter->num_rx_queues;
700 int txr_remaining = adapter->num_tx_queues; 925 int txr_remaining = adapter->num_tx_queues;
701 int rxr_idx = 0, txr_idx = 0, v_idx = 0; 926 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
@@ -739,10 +964,12 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
739 return 0; 964 return 0;
740 965
741err_out: 966err_out:
742 while (v_idx) { 967 adapter->num_tx_queues = 0;
743 v_idx--; 968 adapter->num_rx_queues = 0;
969 adapter->num_q_vectors = 0;
970
971 while (v_idx--)
744 ixgbe_free_q_vector(adapter, v_idx); 972 ixgbe_free_q_vector(adapter, v_idx);
745 }
746 973
747 return -ENOMEM; 974 return -ENOMEM;
748} 975}
@@ -757,14 +984,13 @@ err_out:
757 **/ 984 **/
758static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) 985static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
759{ 986{
760 int v_idx, q_vectors; 987 int v_idx = adapter->num_q_vectors;
761 988
762 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) 989 adapter->num_tx_queues = 0;
763 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 990 adapter->num_rx_queues = 0;
764 else 991 adapter->num_q_vectors = 0;
765 q_vectors = 1;
766 992
767 for (v_idx = 0; v_idx < q_vectors; v_idx++) 993 while (v_idx--)
768 ixgbe_free_q_vector(adapter, v_idx); 994 ixgbe_free_q_vector(adapter, v_idx);
769} 995}
770 996
@@ -788,11 +1014,10 @@ static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
788 * Attempt to configure the interrupts using the best available 1014 * Attempt to configure the interrupts using the best available
789 * capabilities of the hardware and the kernel. 1015 * capabilities of the hardware and the kernel.
790 **/ 1016 **/
791static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) 1017static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
792{ 1018{
793 struct ixgbe_hw *hw = &adapter->hw; 1019 struct ixgbe_hw *hw = &adapter->hw;
794 int err = 0; 1020 int vector, v_budget, err;
795 int vector, v_budget;
796 1021
797 /* 1022 /*
798 * It's easy to be greedy for MSI-X vectors, but it really 1023 * It's easy to be greedy for MSI-X vectors, but it really
@@ -825,11 +1050,10 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
825 ixgbe_acquire_msix_vectors(adapter, v_budget); 1050 ixgbe_acquire_msix_vectors(adapter, v_budget);
826 1051
827 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) 1052 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
828 goto out; 1053 return;
829 } 1054 }
830 1055
831 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; 1056 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
832 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
833 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { 1057 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
834 e_err(probe, 1058 e_err(probe,
835 "ATR is not supported while multiple " 1059 "ATR is not supported while multiple "
@@ -840,23 +1064,18 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
840 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 1064 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
841 ixgbe_disable_sriov(adapter); 1065 ixgbe_disable_sriov(adapter);
842 1066
843 err = ixgbe_set_num_queues(adapter); 1067 adapter->ring_feature[RING_F_RSS].limit = 1;
844 if (err) 1068 ixgbe_set_num_queues(adapter);
845 return err; 1069 adapter->num_q_vectors = 1;
846 1070
847 err = pci_enable_msi(adapter->pdev); 1071 err = pci_enable_msi(adapter->pdev);
848 if (!err) { 1072 if (err) {
849 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
850 } else {
851 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, 1073 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
852 "Unable to allocate MSI interrupt, " 1074 "Unable to allocate MSI interrupt, "
853 "falling back to legacy. Error: %d\n", err); 1075 "falling back to legacy. Error: %d\n", err);
854 /* reset err */ 1076 return;
855 err = 0;
856 } 1077 }
857 1078 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
858out:
859 return err;
860} 1079}
861 1080
862/** 1081/**
@@ -874,15 +1093,10 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
874 int err; 1093 int err;
875 1094
876 /* Number of supported queues */ 1095 /* Number of supported queues */
877 err = ixgbe_set_num_queues(adapter); 1096 ixgbe_set_num_queues(adapter);
878 if (err)
879 return err;
880 1097
881 err = ixgbe_set_interrupt_capability(adapter); 1098 /* Set interrupt mode */
882 if (err) { 1099 ixgbe_set_interrupt_capability(adapter);
883 e_dev_err("Unable to setup interrupt capabilities\n");
884 goto err_set_interrupt;
885 }
886 1100
887 err = ixgbe_alloc_q_vectors(adapter); 1101 err = ixgbe_alloc_q_vectors(adapter);
888 if (err) { 1102 if (err) {
@@ -902,7 +1116,6 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
902 1116
903err_alloc_q_vectors: 1117err_alloc_q_vectors:
904 ixgbe_reset_interrupt_capability(adapter); 1118 ixgbe_reset_interrupt_capability(adapter);
905err_set_interrupt:
906 return err; 1119 return err;
907} 1120}
908 1121
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index e242104ab471..a3dc9657f572 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -516,7 +516,7 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
516 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); 516 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
517} 517}
518 518
519/* 519/**
520 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors 520 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
521 * @adapter: pointer to adapter struct 521 * @adapter: pointer to adapter struct
522 * @direction: 0 for Rx, 1 for Tx, -1 for other causes 522 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
@@ -790,12 +790,10 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
790 total_packets += tx_buffer->gso_segs; 790 total_packets += tx_buffer->gso_segs;
791 791
792#ifdef CONFIG_IXGBE_PTP 792#ifdef CONFIG_IXGBE_PTP
793 if (unlikely(tx_buffer->tx_flags & 793 if (unlikely(tx_buffer->tx_flags & IXGBE_TX_FLAGS_TSTAMP))
794 IXGBE_TX_FLAGS_TSTAMP)) 794 ixgbe_ptp_tx_hwtstamp(q_vector, tx_buffer->skb);
795 ixgbe_ptp_tx_hwtstamp(q_vector,
796 tx_buffer->skb);
797
798#endif 795#endif
796
799 /* free the skb */ 797 /* free the skb */
800 dev_kfree_skb_any(tx_buffer->skb); 798 dev_kfree_skb_any(tx_buffer->skb);
801 799
@@ -995,7 +993,6 @@ out_no_update:
995 993
996static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) 994static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
997{ 995{
998 int num_q_vectors;
999 int i; 996 int i;
1000 997
1001 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) 998 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
@@ -1004,12 +1001,7 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
1004 /* always use CB2 mode, difference is masked in the CB driver */ 1001 /* always use CB2 mode, difference is masked in the CB driver */
1005 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); 1002 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
1006 1003
1007 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) 1004 for (i = 0; i < adapter->num_q_vectors; i++) {
1008 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1009 else
1010 num_q_vectors = 1;
1011
1012 for (i = 0; i < num_q_vectors; i++) {
1013 adapter->q_vector[i]->cpu = -1; 1005 adapter->q_vector[i]->cpu = -1;
1014 ixgbe_update_dca(adapter->q_vector[i]); 1006 ixgbe_update_dca(adapter->q_vector[i]);
1015 } 1007 }
@@ -1399,8 +1391,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1399 ixgbe_rx_checksum(rx_ring, rx_desc, skb); 1391 ixgbe_rx_checksum(rx_ring, rx_desc, skb);
1400 1392
1401#ifdef CONFIG_IXGBE_PTP 1393#ifdef CONFIG_IXGBE_PTP
1402 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)) 1394 ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
1403 ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb);
1404#endif 1395#endif
1405 1396
1406 if ((dev->features & NETIF_F_HW_VLAN_RX) && 1397 if ((dev->features & NETIF_F_HW_VLAN_RX) &&
@@ -1834,11 +1825,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1834static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) 1825static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1835{ 1826{
1836 struct ixgbe_q_vector *q_vector; 1827 struct ixgbe_q_vector *q_vector;
1837 int q_vectors, v_idx; 1828 int v_idx;
1838 u32 mask; 1829 u32 mask;
1839 1830
1840 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1841
1842 /* Populate MSIX to EITR Select */ 1831 /* Populate MSIX to EITR Select */
1843 if (adapter->num_vfs > 32) { 1832 if (adapter->num_vfs > 32) {
1844 u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; 1833 u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
@@ -1849,7 +1838,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1849 * Populate the IVAR table and set the ITR values to the 1838 * Populate the IVAR table and set the ITR values to the
1850 * corresponding register. 1839 * corresponding register.
1851 */ 1840 */
1852 for (v_idx = 0; v_idx < q_vectors; v_idx++) { 1841 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
1853 struct ixgbe_ring *ring; 1842 struct ixgbe_ring *ring;
1854 q_vector = adapter->q_vector[v_idx]; 1843 q_vector = adapter->q_vector[v_idx];
1855 1844
@@ -2413,11 +2402,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
2413static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) 2402static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2414{ 2403{
2415 struct net_device *netdev = adapter->netdev; 2404 struct net_device *netdev = adapter->netdev;
2416 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2417 int vector, err; 2405 int vector, err;
2418 int ri = 0, ti = 0; 2406 int ri = 0, ti = 0;
2419 2407
2420 for (vector = 0; vector < q_vectors; vector++) { 2408 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
2421 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; 2409 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
2422 struct msix_entry *entry = &adapter->msix_entries[vector]; 2410 struct msix_entry *entry = &adapter->msix_entries[vector];
2423 2411
@@ -2572,30 +2560,28 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
2572 2560
2573static void ixgbe_free_irq(struct ixgbe_adapter *adapter) 2561static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2574{ 2562{
2575 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 2563 int vector;
2576 int i, q_vectors;
2577 2564
2578 q_vectors = adapter->num_msix_vectors; 2565 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
2579 i = q_vectors - 1; 2566 free_irq(adapter->pdev->irq, adapter);
2580 free_irq(adapter->msix_entries[i].vector, adapter); 2567 return;
2581 i--; 2568 }
2582 2569
2583 for (; i >= 0; i--) { 2570 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
2584 /* free only the irqs that were actually requested */ 2571 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
2585 if (!adapter->q_vector[i]->rx.ring && 2572 struct msix_entry *entry = &adapter->msix_entries[vector];
2586 !adapter->q_vector[i]->tx.ring)
2587 continue;
2588 2573
2589 /* clear the affinity_mask in the IRQ descriptor */ 2574 /* free only the irqs that were actually requested */
2590 irq_set_affinity_hint(adapter->msix_entries[i].vector, 2575 if (!q_vector->rx.ring && !q_vector->tx.ring)
2591 NULL); 2576 continue;
2592 2577
2593 free_irq(adapter->msix_entries[i].vector, 2578 /* clear the affinity_mask in the IRQ descriptor */
2594 adapter->q_vector[i]); 2579 irq_set_affinity_hint(entry->vector, NULL);
2595 } 2580
2596 } else { 2581 free_irq(entry->vector, q_vector);
2597 free_irq(adapter->pdev->irq, adapter);
2598 } 2582 }
2583
2584 free_irq(adapter->msix_entries[vector++].vector, adapter);
2599} 2585}
2600 2586
2601/** 2587/**
@@ -2619,9 +2605,12 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
2619 } 2605 }
2620 IXGBE_WRITE_FLUSH(&adapter->hw); 2606 IXGBE_WRITE_FLUSH(&adapter->hw);
2621 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 2607 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2622 int i; 2608 int vector;
2623 for (i = 0; i < adapter->num_msix_vectors; i++) 2609
2624 synchronize_irq(adapter->msix_entries[i].vector); 2610 for (vector = 0; vector < adapter->num_q_vectors; vector++)
2611 synchronize_irq(adapter->msix_entries[vector].vector);
2612
2613 synchronize_irq(adapter->msix_entries[vector++].vector);
2625 } else { 2614 } else {
2626 synchronize_irq(adapter->pdev->irq); 2615 synchronize_irq(adapter->pdev->irq);
2627 } 2616 }
@@ -2730,8 +2719,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2730static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) 2719static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
2731{ 2720{
2732 struct ixgbe_hw *hw = &adapter->hw; 2721 struct ixgbe_hw *hw = &adapter->hw;
2733 u32 rttdcs; 2722 u32 rttdcs, mtqc;
2734 u32 reg;
2735 u8 tcs = netdev_get_num_tc(adapter->netdev); 2723 u8 tcs = netdev_get_num_tc(adapter->netdev);
2736 2724
2737 if (hw->mac.type == ixgbe_mac_82598EB) 2725 if (hw->mac.type == ixgbe_mac_82598EB)
@@ -2743,28 +2731,32 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
2743 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 2731 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2744 2732
2745 /* set transmit pool layout */ 2733 /* set transmit pool layout */
2746 switch (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 2734 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2747 case (IXGBE_FLAG_SRIOV_ENABLED): 2735 mtqc = IXGBE_MTQC_VT_ENA;
2748 IXGBE_WRITE_REG(hw, IXGBE_MTQC, 2736 if (tcs > 4)
2749 (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF)); 2737 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
2750 break; 2738 else if (tcs > 1)
2751 default: 2739 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
2752 if (!tcs) 2740 else if (adapter->ring_feature[RING_F_RSS].indices == 4)
2753 reg = IXGBE_MTQC_64Q_1PB; 2741 mtqc |= IXGBE_MTQC_32VF;
2754 else if (tcs <= 4)
2755 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
2756 else 2742 else
2757 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; 2743 mtqc |= IXGBE_MTQC_64VF;
2744 } else {
2745 if (tcs > 4)
2746 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
2747 else if (tcs > 1)
2748 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
2749 else
2750 mtqc = IXGBE_MTQC_64Q_1PB;
2751 }
2758 2752
2759 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg); 2753 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
2760 2754
2761 /* Enable Security TX Buffer IFG for multiple pb */ 2755 /* Enable Security TX Buffer IFG for multiple pb */
2762 if (tcs) { 2756 if (tcs) {
2763 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); 2757 u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
2764 reg |= IXGBE_SECTX_DCB; 2758 sectx |= IXGBE_SECTX_DCB;
2765 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); 2759 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx);
2766 }
2767 break;
2768 } 2760 }
2769 2761
2770 /* re-enable the arbiter */ 2762 /* re-enable the arbiter */
@@ -2858,40 +2850,34 @@ static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
2858static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, 2850static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
2859 struct ixgbe_ring *rx_ring) 2851 struct ixgbe_ring *rx_ring)
2860{ 2852{
2853 struct ixgbe_hw *hw = &adapter->hw;
2861 u32 srrctl; 2854 u32 srrctl;
2862 u8 reg_idx = rx_ring->reg_idx; 2855 u8 reg_idx = rx_ring->reg_idx;
2863 2856
2864 switch (adapter->hw.mac.type) { 2857 if (hw->mac.type == ixgbe_mac_82598EB) {
2865 case ixgbe_mac_82598EB: { 2858 u16 mask = adapter->ring_feature[RING_F_RSS].mask;
2866 struct ixgbe_ring_feature *feature = adapter->ring_feature;
2867 const int mask = feature[RING_F_RSS].mask;
2868 reg_idx = reg_idx & mask;
2869 }
2870 break;
2871 case ixgbe_mac_82599EB:
2872 case ixgbe_mac_X540:
2873 default:
2874 break;
2875 }
2876
2877 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx));
2878 2859
2879 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 2860 /*
2880 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 2861 * if VMDq is not active we must program one srrctl register
2881 if (adapter->num_vfs) 2862 * per RSS queue since we have enabled RDRXCTL.MVMEN
2882 srrctl |= IXGBE_SRRCTL_DROP_EN; 2863 */
2864 reg_idx &= mask;
2865 }
2883 2866
2884 srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & 2867 /* configure header buffer length, needed for RSC */
2885 IXGBE_SRRCTL_BSIZEHDR_MASK; 2868 srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
2886 2869
2870 /* configure the packet buffer length */
2887#if PAGE_SIZE > IXGBE_MAX_RXBUFFER 2871#if PAGE_SIZE > IXGBE_MAX_RXBUFFER
2888 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 2872 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2889#else 2873#else
2890 srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 2874 srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2891#endif 2875#endif
2876
2877 /* configure descriptor type */
2892 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 2878 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2893 2879
2894 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl); 2880 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
2895} 2881}
2896 2882
2897static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) 2883static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
@@ -2903,11 +2889,15 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
2903 u32 mrqc = 0, reta = 0; 2889 u32 mrqc = 0, reta = 0;
2904 u32 rxcsum; 2890 u32 rxcsum;
2905 int i, j; 2891 int i, j;
2906 u8 tcs = netdev_get_num_tc(adapter->netdev); 2892 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
2907 int maxq = adapter->ring_feature[RING_F_RSS].indices;
2908 2893
2909 if (tcs) 2894 /*
2910 maxq = min(maxq, adapter->num_tx_queues / tcs); 2895 * Program table for at least 2 queues w/ SR-IOV so that VFs can
2896 * make full use of any rings they may have. We will use the
2897 * PSRTYPE register to control how many rings we use within the PF.
2898 */
2899 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2))
2900 rss_i = 2;
2911 2901
2912 /* Fill out hash function seeds */ 2902 /* Fill out hash function seeds */
2913 for (i = 0; i < 10; i++) 2903 for (i = 0; i < 10; i++)
@@ -2915,7 +2905,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
2915 2905
2916 /* Fill out redirection table */ 2906 /* Fill out redirection table */
2917 for (i = 0, j = 0; i < 128; i++, j++) { 2907 for (i = 0, j = 0; i < 128; i++, j++) {
2918 if (j == maxq) 2908 if (j == rss_i)
2919 j = 0; 2909 j = 0;
2920 /* reta = 4-byte sliding window of 2910 /* reta = 4-byte sliding window of
2921 * 0x00..(indices-1)(indices-1)00..etc. */ 2911 * 0x00..(indices-1)(indices-1)00..etc. */
@@ -2929,35 +2919,36 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
2929 rxcsum |= IXGBE_RXCSUM_PCSD; 2919 rxcsum |= IXGBE_RXCSUM_PCSD;
2930 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 2920 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2931 2921
2932 if (adapter->hw.mac.type == ixgbe_mac_82598EB && 2922 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2933 (adapter->flags & IXGBE_FLAG_RSS_ENABLED)) { 2923 if (adapter->ring_feature[RING_F_RSS].mask)
2934 mrqc = IXGBE_MRQC_RSSEN; 2924 mrqc = IXGBE_MRQC_RSSEN;
2935 } else { 2925 } else {
2936 int mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED 2926 u8 tcs = netdev_get_num_tc(adapter->netdev);
2937 | IXGBE_FLAG_SRIOV_ENABLED); 2927
2938 2928 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2939 switch (mask) { 2929 if (tcs > 4)
2940 case (IXGBE_FLAG_RSS_ENABLED): 2930 mrqc = IXGBE_MRQC_VMDQRT8TCEN; /* 8 TCs */
2941 if (!tcs) 2931 else if (tcs > 1)
2942 mrqc = IXGBE_MRQC_RSSEN; 2932 mrqc = IXGBE_MRQC_VMDQRT4TCEN; /* 4 TCs */
2943 else if (tcs <= 4) 2933 else if (adapter->ring_feature[RING_F_RSS].indices == 4)
2944 mrqc = IXGBE_MRQC_RTRSS4TCEN; 2934 mrqc = IXGBE_MRQC_VMDQRSS32EN;
2945 else 2935 else
2936 mrqc = IXGBE_MRQC_VMDQRSS64EN;
2937 } else {
2938 if (tcs > 4)
2946 mrqc = IXGBE_MRQC_RTRSS8TCEN; 2939 mrqc = IXGBE_MRQC_RTRSS8TCEN;
2947 break; 2940 else if (tcs > 1)
2948 case (IXGBE_FLAG_SRIOV_ENABLED): 2941 mrqc = IXGBE_MRQC_RTRSS4TCEN;
2949 mrqc = IXGBE_MRQC_VMDQEN; 2942 else
2950 break; 2943 mrqc = IXGBE_MRQC_RSSEN;
2951 default:
2952 break;
2953 } 2944 }
2954 } 2945 }
2955 2946
2956 /* Perform hash on these packet types */ 2947 /* Perform hash on these packet types */
2957 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 2948 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 |
2958 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP 2949 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2959 | IXGBE_MRQC_RSS_FIELD_IPV6 2950 IXGBE_MRQC_RSS_FIELD_IPV6 |
2960 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 2951 IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2961 2952
2962 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) 2953 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2963 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 2954 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
@@ -3108,6 +3099,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
3108static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) 3099static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
3109{ 3100{
3110 struct ixgbe_hw *hw = &adapter->hw; 3101 struct ixgbe_hw *hw = &adapter->hw;
3102 int rss_i = adapter->ring_feature[RING_F_RSS].indices;
3111 int p; 3103 int p;
3112 3104
3113 /* PSRTYPE must be initialized in non 82598 adapters */ 3105 /* PSRTYPE must be initialized in non 82598 adapters */
@@ -3120,8 +3112,10 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
3120 if (hw->mac.type == ixgbe_mac_82598EB) 3112 if (hw->mac.type == ixgbe_mac_82598EB)
3121 return; 3113 return;
3122 3114
3123 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) 3115 if (rss_i > 3)
3124 psrtype |= (adapter->num_rx_queues_per_pool << 29); 3116 psrtype |= 2 << 29;
3117 else if (rss_i > 1)
3118 psrtype |= 1 << 29;
3125 3119
3126 for (p = 0; p < adapter->num_rx_pools; p++) 3120 for (p = 0; p < adapter->num_rx_pools; p++)
3127 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p), 3121 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p),
@@ -3131,28 +3125,28 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
3131static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) 3125static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
3132{ 3126{
3133 struct ixgbe_hw *hw = &adapter->hw; 3127 struct ixgbe_hw *hw = &adapter->hw;
3134 u32 gcr_ext;
3135 u32 vt_reg_bits;
3136 u32 reg_offset, vf_shift; 3128 u32 reg_offset, vf_shift;
3137 u32 vmdctl; 3129 u32 gcr_ext, vmdctl;
3138 int i; 3130 int i;
3139 3131
3140 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 3132 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
3141 return; 3133 return;
3142 3134
3143 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 3135 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3144 vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | IXGBE_VT_CTL_REPLEN; 3136 vmdctl |= IXGBE_VMD_CTL_VMDQ_EN;
3145 vt_reg_bits |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT); 3137 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
3146 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits); 3138 vmdctl |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT);
3139 vmdctl |= IXGBE_VT_CTL_REPLEN;
3140 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
3147 3141
3148 vf_shift = adapter->num_vfs % 32; 3142 vf_shift = adapter->num_vfs % 32;
3149 reg_offset = (adapter->num_vfs >= 32) ? 1 : 0; 3143 reg_offset = (adapter->num_vfs >= 32) ? 1 : 0;
3150 3144
3151 /* Enable only the PF's pool for Tx/Rx */ 3145 /* Enable only the PF's pool for Tx/Rx */
3152 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift)); 3146 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift);
3153 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), 0); 3147 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
3154 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift)); 3148 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift);
3155 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), 0); 3149 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
3156 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); 3150 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3157 3151
3158 /* Map PF MAC address in RAR Entry 0 to first pool following VFs */ 3152 /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
@@ -3162,16 +3156,25 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
3162 * Set up VF register offsets for selected VT Mode, 3156 * Set up VF register offsets for selected VT Mode,
3163 * i.e. 32 or 64 VFs for SR-IOV 3157 * i.e. 32 or 64 VFs for SR-IOV
3164 */ 3158 */
3165 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); 3159 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
3166 gcr_ext |= IXGBE_GCR_EXT_MSIX_EN; 3160 case IXGBE_82599_VMDQ_8Q_MASK:
3167 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64; 3161 gcr_ext = IXGBE_GCR_EXT_VT_MODE_16;
3162 break;
3163 case IXGBE_82599_VMDQ_4Q_MASK:
3164 gcr_ext = IXGBE_GCR_EXT_VT_MODE_32;
3165 break;
3166 default:
3167 gcr_ext = IXGBE_GCR_EXT_VT_MODE_64;
3168 break;
3169 }
3170
3168 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); 3171 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
3169 3172
3170 /* enable Tx loopback for VF/PF communication */ 3173 /* enable Tx loopback for VF/PF communication */
3171 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); 3174 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3175
3172 /* Enable MAC Anti-Spoofing */ 3176 /* Enable MAC Anti-Spoofing */
3173 hw->mac.ops.set_mac_anti_spoofing(hw, 3177 hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
3174 (adapter->num_vfs != 0),
3175 adapter->num_vfs); 3178 adapter->num_vfs);
3176 /* For VFs that have spoof checking turned off */ 3179 /* For VFs that have spoof checking turned off */
3177 for (i = 0; i < adapter->num_vfs; i++) { 3180 for (i = 0; i < adapter->num_vfs; i++) {
@@ -3564,37 +3567,21 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3564static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) 3567static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
3565{ 3568{
3566 int q_idx; 3569 int q_idx;
3567 struct ixgbe_q_vector *q_vector;
3568 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3569
3570 /* legacy and MSI only use one vector */
3571 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
3572 q_vectors = 1;
3573 3570
3574 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 3571 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
3575 q_vector = adapter->q_vector[q_idx]; 3572 napi_enable(&adapter->q_vector[q_idx]->napi);
3576 napi_enable(&q_vector->napi);
3577 }
3578} 3573}
3579 3574
3580static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) 3575static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
3581{ 3576{
3582 int q_idx; 3577 int q_idx;
3583 struct ixgbe_q_vector *q_vector;
3584 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3585
3586 /* legacy and MSI only use one vector */
3587 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
3588 q_vectors = 1;
3589 3578
3590 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 3579 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
3591 q_vector = adapter->q_vector[q_idx]; 3580 napi_disable(&adapter->q_vector[q_idx]->napi);
3592 napi_disable(&q_vector->napi);
3593 }
3594} 3581}
3595 3582
3596#ifdef CONFIG_IXGBE_DCB 3583#ifdef CONFIG_IXGBE_DCB
3597/* 3584/**
3598 * ixgbe_configure_dcb - Configure DCB hardware 3585 * ixgbe_configure_dcb - Configure DCB hardware
3599 * @adapter: ixgbe adapter struct 3586 * @adapter: ixgbe adapter struct
3600 * 3587 *
@@ -3641,19 +3628,16 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3641 3628
3642 /* Enable RSS Hash per TC */ 3629 /* Enable RSS Hash per TC */
3643 if (hw->mac.type != ixgbe_mac_82598EB) { 3630 if (hw->mac.type != ixgbe_mac_82598EB) {
3644 int i; 3631 u32 msb = 0;
3645 u32 reg = 0; 3632 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1;
3646
3647 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
3648 u8 msb = 0;
3649 u8 cnt = adapter->netdev->tc_to_txq[i].count;
3650
3651 while (cnt >>= 1)
3652 msb++;
3653 3633
3654 reg |= msb << IXGBE_RQTC_SHIFT_TC(i); 3634 while (rss_i) {
3635 msb++;
3636 rss_i >>= 1;
3655 } 3637 }
3656 IXGBE_WRITE_REG(hw, IXGBE_RQTC, reg); 3638
3639 /* write msb to all 8 TCs in one write */
3640 IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111);
3657 } 3641 }
3658} 3642}
3659#endif 3643#endif
@@ -3661,11 +3645,11 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3661/* Additional bittime to account for IXGBE framing */ 3645/* Additional bittime to account for IXGBE framing */
3662#define IXGBE_ETH_FRAMING 20 3646#define IXGBE_ETH_FRAMING 20
3663 3647
3664/* 3648/**
3665 * ixgbe_hpbthresh - calculate high water mark for flow control 3649 * ixgbe_hpbthresh - calculate high water mark for flow control
3666 * 3650 *
3667 * @adapter: board private structure to calculate for 3651 * @adapter: board private structure to calculate for
3668 * @pb - packet buffer to calculate 3652 * @pb: packet buffer to calculate
3669 */ 3653 */
3670static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) 3654static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
3671{ 3655{
@@ -3679,18 +3663,12 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
3679 3663
3680#ifdef IXGBE_FCOE 3664#ifdef IXGBE_FCOE
3681 /* FCoE traffic class uses FCOE jumbo frames */ 3665 /* FCoE traffic class uses FCOE jumbo frames */
3682 if (dev->features & NETIF_F_FCOE_MTU) { 3666 if ((dev->features & NETIF_F_FCOE_MTU) &&
3683 int fcoe_pb = 0; 3667 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
3684 3668 (pb == ixgbe_fcoe_get_tc(adapter)))
3685#ifdef CONFIG_IXGBE_DCB 3669 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
3686 fcoe_pb = netdev_get_prio_tc_map(dev, adapter->fcoe.up);
3687 3670
3688#endif 3671#endif
3689 if (fcoe_pb == pb && tc < IXGBE_FCOE_JUMBO_FRAME_SIZE)
3690 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
3691 }
3692#endif
3693
3694 /* Calculate delay value for device */ 3672 /* Calculate delay value for device */
3695 switch (hw->mac.type) { 3673 switch (hw->mac.type) {
3696 case ixgbe_mac_X540: 3674 case ixgbe_mac_X540:
@@ -3725,11 +3703,11 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
3725 return marker; 3703 return marker;
3726} 3704}
3727 3705
3728/* 3706/**
3729 * ixgbe_lpbthresh - calculate low water mark for for flow control 3707 * ixgbe_lpbthresh - calculate low water mark for for flow control
3730 * 3708 *
3731 * @adapter: board private structure to calculate for 3709 * @adapter: board private structure to calculate for
3732 * @pb - packet buffer to calculate 3710 * @pb: packet buffer to calculate
3733 */ 3711 */
3734static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter) 3712static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
3735{ 3713{
@@ -3973,7 +3951,18 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
3973 3951
3974 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 3952 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3975 gpie &= ~IXGBE_GPIE_VTMODE_MASK; 3953 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
3976 gpie |= IXGBE_GPIE_VTMODE_64; 3954
3955 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
3956 case IXGBE_82599_VMDQ_8Q_MASK:
3957 gpie |= IXGBE_GPIE_VTMODE_16;
3958 break;
3959 case IXGBE_82599_VMDQ_4Q_MASK:
3960 gpie |= IXGBE_GPIE_VTMODE_32;
3961 break;
3962 default:
3963 gpie |= IXGBE_GPIE_VTMODE_64;
3964 break;
3965 }
3977 } 3966 }
3978 3967
3979 /* Enable Thermal over heat sensor interrupt */ 3968 /* Enable Thermal over heat sensor interrupt */
@@ -4413,18 +4402,17 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4413 4402
4414 /* Set capability flags */ 4403 /* Set capability flags */
4415 rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus()); 4404 rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
4416 adapter->ring_feature[RING_F_RSS].indices = rss; 4405 adapter->ring_feature[RING_F_RSS].limit = rss;
4417 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
4418 switch (hw->mac.type) { 4406 switch (hw->mac.type) {
4419 case ixgbe_mac_82598EB: 4407 case ixgbe_mac_82598EB:
4420 if (hw->device_id == IXGBE_DEV_ID_82598AT) 4408 if (hw->device_id == IXGBE_DEV_ID_82598AT)
4421 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; 4409 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
4422 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; 4410 adapter->max_q_vectors = MAX_Q_VECTORS_82598;
4423 break; 4411 break;
4424 case ixgbe_mac_X540: 4412 case ixgbe_mac_X540:
4425 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; 4413 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
4426 case ixgbe_mac_82599EB: 4414 case ixgbe_mac_82599EB:
4427 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; 4415 adapter->max_q_vectors = MAX_Q_VECTORS_82599;
4428 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; 4416 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
4429 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 4417 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
4430 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) 4418 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
@@ -4432,13 +4420,12 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4432 /* Flow Director hash filters enabled */ 4420 /* Flow Director hash filters enabled */
4433 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; 4421 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
4434 adapter->atr_sample_rate = 20; 4422 adapter->atr_sample_rate = 20;
4435 adapter->ring_feature[RING_F_FDIR].indices = 4423 adapter->ring_feature[RING_F_FDIR].limit =
4436 IXGBE_MAX_FDIR_INDICES; 4424 IXGBE_MAX_FDIR_INDICES;
4437 adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; 4425 adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
4438#ifdef IXGBE_FCOE 4426#ifdef IXGBE_FCOE
4439 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; 4427 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
4440 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; 4428 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
4441 adapter->ring_feature[RING_F_FCOE].indices = 0;
4442#ifdef CONFIG_IXGBE_DCB 4429#ifdef CONFIG_IXGBE_DCB
4443 /* Default traffic class to use for FCoE */ 4430 /* Default traffic class to use for FCoE */
4444 adapter->fcoe.up = IXGBE_FCOE_DEFTC; 4431 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
@@ -4588,10 +4575,16 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
4588 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]); 4575 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
4589 if (!err) 4576 if (!err)
4590 continue; 4577 continue;
4578
4591 e_err(probe, "Allocation for Tx Queue %u failed\n", i); 4579 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
4592 break; 4580 goto err_setup_tx;
4593 } 4581 }
4594 4582
4583 return 0;
4584err_setup_tx:
4585 /* rewind the index freeing the rings as we go */
4586 while (i--)
4587 ixgbe_free_tx_resources(adapter->tx_ring[i]);
4595 return err; 4588 return err;
4596} 4589}
4597 4590
@@ -4666,10 +4659,16 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
4666 err = ixgbe_setup_rx_resources(adapter->rx_ring[i]); 4659 err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
4667 if (!err) 4660 if (!err)
4668 continue; 4661 continue;
4662
4669 e_err(probe, "Allocation for Rx Queue %u failed\n", i); 4663 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
4670 break; 4664 goto err_setup_rx;
4671 } 4665 }
4672 4666
4667 return 0;
4668err_setup_rx:
4669 /* rewind the index freeing the rings as we go */
4670 while (i--)
4671 ixgbe_free_rx_resources(adapter->rx_ring[i]);
4673 return err; 4672 return err;
4674} 4673}
4675 4674
@@ -4825,15 +4824,31 @@ static int ixgbe_open(struct net_device *netdev)
4825 if (err) 4824 if (err)
4826 goto err_req_irq; 4825 goto err_req_irq;
4827 4826
4827 /* Notify the stack of the actual queue counts. */
4828 err = netif_set_real_num_tx_queues(netdev,
4829 adapter->num_rx_pools > 1 ? 1 :
4830 adapter->num_tx_queues);
4831 if (err)
4832 goto err_set_queues;
4833
4834
4835 err = netif_set_real_num_rx_queues(netdev,
4836 adapter->num_rx_pools > 1 ? 1 :
4837 adapter->num_rx_queues);
4838 if (err)
4839 goto err_set_queues;
4840
4828 ixgbe_up_complete(adapter); 4841 ixgbe_up_complete(adapter);
4829 4842
4830 return 0; 4843 return 0;
4831 4844
4845err_set_queues:
4846 ixgbe_free_irq(adapter);
4832err_req_irq: 4847err_req_irq:
4833err_setup_rx:
4834 ixgbe_free_all_rx_resources(adapter); 4848 ixgbe_free_all_rx_resources(adapter);
4835err_setup_tx: 4849err_setup_rx:
4836 ixgbe_free_all_tx_resources(adapter); 4850 ixgbe_free_all_tx_resources(adapter);
4851err_setup_tx:
4837 ixgbe_reset(adapter); 4852 ixgbe_reset(adapter);
4838 4853
4839 return err; 4854 return err;
@@ -4891,23 +4906,19 @@ static int ixgbe_resume(struct pci_dev *pdev)
4891 4906
4892 pci_wake_from_d3(pdev, false); 4907 pci_wake_from_d3(pdev, false);
4893 4908
4894 rtnl_lock();
4895 err = ixgbe_init_interrupt_scheme(adapter);
4896 rtnl_unlock();
4897 if (err) {
4898 e_dev_err("Cannot initialize interrupts for device\n");
4899 return err;
4900 }
4901
4902 ixgbe_reset(adapter); 4909 ixgbe_reset(adapter);
4903 4910
4904 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); 4911 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
4905 4912
4906 if (netif_running(netdev)) { 4913 rtnl_lock();
4914 err = ixgbe_init_interrupt_scheme(adapter);
4915 if (!err && netif_running(netdev))
4907 err = ixgbe_open(netdev); 4916 err = ixgbe_open(netdev);
4908 if (err) 4917
4909 return err; 4918 rtnl_unlock();
4910 } 4919
4920 if (err)
4921 return err;
4911 4922
4912 netif_device_attach(netdev); 4923 netif_device_attach(netdev);
4913 4924
@@ -5246,7 +5257,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5246 5257
5247/** 5258/**
5248 * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table 5259 * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table
5249 * @adapter - pointer to the device adapter structure 5260 * @adapter: pointer to the device adapter structure
5250 **/ 5261 **/
5251static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter) 5262static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
5252{ 5263{
@@ -5282,7 +5293,7 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
5282 5293
5283/** 5294/**
5284 * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts 5295 * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts
5285 * @adapter - pointer to the device adapter structure 5296 * @adapter: pointer to the device adapter structure
5286 * 5297 *
5287 * This function serves two purposes. First it strobes the interrupt lines 5298 * This function serves two purposes. First it strobes the interrupt lines
5288 * in order to make certain interrupts are occurring. Secondly it sets the 5299 * in order to make certain interrupts are occurring. Secondly it sets the
@@ -5316,7 +5327,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
5316 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); 5327 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
5317 } else { 5328 } else {
5318 /* get one bit for every active tx/rx interrupt vector */ 5329 /* get one bit for every active tx/rx interrupt vector */
5319 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { 5330 for (i = 0; i < adapter->num_q_vectors; i++) {
5320 struct ixgbe_q_vector *qv = adapter->q_vector[i]; 5331 struct ixgbe_q_vector *qv = adapter->q_vector[i];
5321 if (qv->rx.ring || qv->tx.ring) 5332 if (qv->rx.ring || qv->tx.ring)
5322 eics |= ((u64)1 << i); 5333 eics |= ((u64)1 << i);
@@ -5330,8 +5341,8 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
5330 5341
5331/** 5342/**
5332 * ixgbe_watchdog_update_link - update the link status 5343 * ixgbe_watchdog_update_link - update the link status
5333 * @adapter - pointer to the device adapter structure 5344 * @adapter: pointer to the device adapter structure
5334 * @link_speed - pointer to a u32 to store the link_speed 5345 * @link_speed: pointer to a u32 to store the link_speed
5335 **/ 5346 **/
5336static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) 5347static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
5337{ 5348{
@@ -5374,7 +5385,7 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
5374/** 5385/**
5375 * ixgbe_watchdog_link_is_up - update netif_carrier status and 5386 * ixgbe_watchdog_link_is_up - update netif_carrier status and
5376 * print link up message 5387 * print link up message
5377 * @adapter - pointer to the device adapter structure 5388 * @adapter: pointer to the device adapter structure
5378 **/ 5389 **/
5379static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) 5390static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
5380{ 5391{
@@ -5429,12 +5440,15 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
5429 5440
5430 netif_carrier_on(netdev); 5441 netif_carrier_on(netdev);
5431 ixgbe_check_vf_rate_limit(adapter); 5442 ixgbe_check_vf_rate_limit(adapter);
5443
5444 /* ping all the active vfs to let them know link has changed */
5445 ixgbe_ping_all_vfs(adapter);
5432} 5446}
5433 5447
5434/** 5448/**
5435 * ixgbe_watchdog_link_is_down - update netif_carrier status and 5449 * ixgbe_watchdog_link_is_down - update netif_carrier status and
5436 * print link down message 5450 * print link down message
5437 * @adapter - pointer to the adapter structure 5451 * @adapter: pointer to the adapter structure
5438 **/ 5452 **/
5439static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter) 5453static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
5440{ 5454{
@@ -5458,11 +5472,14 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
5458 5472
5459 e_info(drv, "NIC Link is Down\n"); 5473 e_info(drv, "NIC Link is Down\n");
5460 netif_carrier_off(netdev); 5474 netif_carrier_off(netdev);
5475
5476 /* ping all the active vfs to let them know link has changed */
5477 ixgbe_ping_all_vfs(adapter);
5461} 5478}
5462 5479
5463/** 5480/**
5464 * ixgbe_watchdog_flush_tx - flush queues on link down 5481 * ixgbe_watchdog_flush_tx - flush queues on link down
5465 * @adapter - pointer to the device adapter structure 5482 * @adapter: pointer to the device adapter structure
5466 **/ 5483 **/
5467static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) 5484static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
5468{ 5485{
@@ -5511,7 +5528,7 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
5511 5528
5512/** 5529/**
5513 * ixgbe_watchdog_subtask - check and bring link up 5530 * ixgbe_watchdog_subtask - check and bring link up
5514 * @adapter - pointer to the device adapter structure 5531 * @adapter: pointer to the device adapter structure
5515 **/ 5532 **/
5516static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter) 5533static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
5517{ 5534{
@@ -5535,7 +5552,7 @@ static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
5535 5552
5536/** 5553/**
5537 * ixgbe_sfp_detection_subtask - poll for SFP+ cable 5554 * ixgbe_sfp_detection_subtask - poll for SFP+ cable
5538 * @adapter - the ixgbe adapter structure 5555 * @adapter: the ixgbe adapter structure
5539 **/ 5556 **/
5540static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) 5557static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
5541{ 5558{
@@ -5602,7 +5619,7 @@ sfp_out:
5602 5619
5603/** 5620/**
5604 * ixgbe_sfp_link_config_subtask - set up link SFP after module install 5621 * ixgbe_sfp_link_config_subtask - set up link SFP after module install
5605 * @adapter - the ixgbe adapter structure 5622 * @adapter: the ixgbe adapter structure
5606 **/ 5623 **/
5607static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) 5624static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
5608{ 5625{
@@ -6233,8 +6250,14 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6233 if (((protocol == htons(ETH_P_FCOE)) || 6250 if (((protocol == htons(ETH_P_FCOE)) ||
6234 (protocol == htons(ETH_P_FIP))) && 6251 (protocol == htons(ETH_P_FIP))) &&
6235 (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { 6252 (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
6236 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); 6253 struct ixgbe_ring_feature *f;
6237 txq += adapter->ring_feature[RING_F_FCOE].mask; 6254
6255 f = &adapter->ring_feature[RING_F_FCOE];
6256
6257 while (txq >= f->indices)
6258 txq -= f->indices;
6259 txq += adapter->ring_feature[RING_F_FCOE].offset;
6260
6238 return txq; 6261 return txq;
6239 } 6262 }
6240#endif 6263#endif
@@ -6389,17 +6412,12 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
6389 struct ixgbe_adapter *adapter = netdev_priv(netdev); 6412 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6390 struct ixgbe_ring *tx_ring; 6413 struct ixgbe_ring *tx_ring;
6391 6414
6392 if (skb->len <= 0) {
6393 dev_kfree_skb_any(skb);
6394 return NETDEV_TX_OK;
6395 }
6396
6397 /* 6415 /*
6398 * The minimum packet size for olinfo paylen is 17 so pad the skb 6416 * The minimum packet size for olinfo paylen is 17 so pad the skb
6399 * in order to meet this minimum size requirement. 6417 * in order to meet this minimum size requirement.
6400 */ 6418 */
6401 if (skb->len < 17) { 6419 if (unlikely(skb->len < 17)) {
6402 if (skb_padto(skb, 17)) 6420 if (skb_pad(skb, 17 - skb->len))
6403 return NETDEV_TX_OK; 6421 return NETDEV_TX_OK;
6404 skb->len = 17; 6422 skb->len = 17;
6405 } 6423 }
@@ -6533,11 +6551,8 @@ static void ixgbe_netpoll(struct net_device *netdev)
6533 6551
6534 adapter->flags |= IXGBE_FLAG_IN_NETPOLL; 6552 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
6535 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 6553 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
6536 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 6554 for (i = 0; i < adapter->num_q_vectors; i++)
6537 for (i = 0; i < num_q_vectors; i++) { 6555 ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
6538 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
6539 ixgbe_msix_clean_rings(0, q_vector);
6540 }
6541 } else { 6556 } else {
6542 ixgbe_intr(adapter->pdev->irq, netdev); 6557 ixgbe_intr(adapter->pdev->irq, netdev);
6543 } 6558 }
@@ -6594,8 +6609,9 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
6594} 6609}
6595 6610
6596#ifdef CONFIG_IXGBE_DCB 6611#ifdef CONFIG_IXGBE_DCB
6597/* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. 6612/**
6598 * #adapter: pointer to ixgbe_adapter 6613 * ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
6614 * @adapter: pointer to ixgbe_adapter
6599 * @tc: number of traffic classes currently enabled 6615 * @tc: number of traffic classes currently enabled
6600 * 6616 *
6601 * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm 6617 * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm
@@ -6630,8 +6646,33 @@ static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
6630 return; 6646 return;
6631} 6647}
6632 6648
6633/* ixgbe_setup_tc - routine to configure net_device for multiple traffic 6649/**
6634 * classes. 6650 * ixgbe_set_prio_tc_map - Configure netdev prio tc map
6651 * @adapter: Pointer to adapter struct
6652 *
6653 * Populate the netdev user priority to tc map
6654 */
6655static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
6656{
6657 struct net_device *dev = adapter->netdev;
6658 struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
6659 struct ieee_ets *ets = adapter->ixgbe_ieee_ets;
6660 u8 prio;
6661
6662 for (prio = 0; prio < MAX_USER_PRIORITY; prio++) {
6663 u8 tc = 0;
6664
6665 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)
6666 tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio);
6667 else if (ets)
6668 tc = ets->prio_tc[prio];
6669
6670 netdev_set_prio_tc_map(dev, prio, tc);
6671 }
6672}
6673
6674/**
6675 * ixgbe_setup_tc - configure net_device for multiple traffic classes
6635 * 6676 *
6636 * @netdev: net device to configure 6677 * @netdev: net device to configure
6637 * @tc: number of traffic classes to enable 6678 * @tc: number of traffic classes to enable
@@ -6647,11 +6688,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
6647 return -EINVAL; 6688 return -EINVAL;
6648 } 6689 }
6649 6690
6650 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
6651 e_err(drv, "Enable failed, SR-IOV enabled\n");
6652 return -EINVAL;
6653 }
6654
6655 /* Hardware supports up to 8 traffic classes */ 6691 /* Hardware supports up to 8 traffic classes */
6656 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs || 6692 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
6657 (hw->mac.type == ixgbe_mac_82598EB && 6693 (hw->mac.type == ixgbe_mac_82598EB &&
@@ -6668,6 +6704,8 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
6668 6704
6669 if (tc) { 6705 if (tc) {
6670 netdev_set_num_tc(dev, tc); 6706 netdev_set_num_tc(dev, tc);
6707 ixgbe_set_prio_tc_map(adapter);
6708
6671 adapter->flags |= IXGBE_FLAG_DCB_ENABLED; 6709 adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
6672 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 6710 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
6673 6711
@@ -6677,6 +6715,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
6677 } 6715 }
6678 } else { 6716 } else {
6679 netdev_reset_tc(dev); 6717 netdev_reset_tc(dev);
6718
6680 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 6719 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
6681 adapter->hw.fc.requested_mode = adapter->last_lfc_mode; 6720 adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
6682 6721
@@ -6711,10 +6750,6 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
6711{ 6750{
6712 struct ixgbe_adapter *adapter = netdev_priv(netdev); 6751 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6713 6752
6714 /* return error if RXHASH is being enabled when RSS is not supported */
6715 if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
6716 features &= ~NETIF_F_RXHASH;
6717
6718 /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ 6753 /* If Rx checksum is disabled, then RSC/LRO should also be disabled */
6719 if (!(features & NETIF_F_RXCSUM)) 6754 if (!(features & NETIF_F_RXCSUM))
6720 features &= ~NETIF_F_LRO; 6755 features &= ~NETIF_F_LRO;
@@ -6757,7 +6792,7 @@ static int ixgbe_set_features(struct net_device *netdev,
6757 if (!(features & NETIF_F_NTUPLE)) { 6792 if (!(features & NETIF_F_NTUPLE)) {
6758 if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { 6793 if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
6759 /* turn off Flow Director, set ATR and reset */ 6794 /* turn off Flow Director, set ATR and reset */
6760 if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && 6795 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
6761 !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) 6796 !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
6762 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; 6797 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
6763 need_reset = true; 6798 need_reset = true;
@@ -7040,7 +7075,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7040#endif 7075#endif
7041 7076
7042 if (ii->mac == ixgbe_mac_82598EB) 7077 if (ii->mac == ixgbe_mac_82598EB)
7078#ifdef CONFIG_IXGBE_DCB
7079 indices = min_t(unsigned int, indices, MAX_TRAFFIC_CLASS * 4);
7080#else
7043 indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES); 7081 indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
7082#endif
7044 else 7083 else
7045 indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES); 7084 indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
7046 7085
@@ -7191,10 +7230,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7191 netdev->priv_flags |= IFF_UNICAST_FLT; 7230 netdev->priv_flags |= IFF_UNICAST_FLT;
7192 netdev->priv_flags |= IFF_SUPP_NOFCS; 7231 netdev->priv_flags |= IFF_SUPP_NOFCS;
7193 7232
7194 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
7195 adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
7196 IXGBE_FLAG_DCB_ENABLED);
7197
7198#ifdef CONFIG_IXGBE_DCB 7233#ifdef CONFIG_IXGBE_DCB
7199 netdev->dcbnl_ops = &dcbnl_ops; 7234 netdev->dcbnl_ops = &dcbnl_ops;
7200#endif 7235#endif
@@ -7249,11 +7284,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7249 if (err) 7284 if (err)
7250 goto err_sw_init; 7285 goto err_sw_init;
7251 7286
7252 if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) {
7253 netdev->hw_features &= ~NETIF_F_RXHASH;
7254 netdev->features &= ~NETIF_F_RXHASH;
7255 }
7256
7257 /* WOL not supported for all devices */ 7287 /* WOL not supported for all devices */
7258 adapter->wol = 0; 7288 adapter->wol = 0;
7259 hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap); 7289 hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 24117709d6a2..71659edf81aa 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -907,6 +907,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
907 * 8 SFP_act_lmt_DA_CORE1 - 82599-specific 907 * 8 SFP_act_lmt_DA_CORE1 - 82599-specific
908 * 9 SFP_1g_cu_CORE0 - 82599-specific 908 * 9 SFP_1g_cu_CORE0 - 82599-specific
909 * 10 SFP_1g_cu_CORE1 - 82599-specific 909 * 10 SFP_1g_cu_CORE1 - 82599-specific
910 * 11 SFP_1g_sx_CORE0 - 82599-specific
911 * 12 SFP_1g_sx_CORE1 - 82599-specific
910 */ 912 */
911 if (hw->mac.type == ixgbe_mac_82598EB) { 913 if (hw->mac.type == ixgbe_mac_82598EB) {
912 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) 914 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
@@ -957,6 +959,13 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
957 else 959 else
958 hw->phy.sfp_type = 960 hw->phy.sfp_type =
959 ixgbe_sfp_type_1g_cu_core1; 961 ixgbe_sfp_type_1g_cu_core1;
962 } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) {
963 if (hw->bus.lan_id == 0)
964 hw->phy.sfp_type =
965 ixgbe_sfp_type_1g_sx_core0;
966 else
967 hw->phy.sfp_type =
968 ixgbe_sfp_type_1g_sx_core1;
960 } else { 969 } else {
961 hw->phy.sfp_type = ixgbe_sfp_type_unknown; 970 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
962 } 971 }
@@ -1049,7 +1058,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
1049 /* Verify supported 1G SFP modules */ 1058 /* Verify supported 1G SFP modules */
1050 if (comp_codes_10g == 0 && 1059 if (comp_codes_10g == 0 &&
1051 !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || 1060 !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1052 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0)) { 1061 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1062 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
1063 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
1053 hw->phy.type = ixgbe_phy_sfp_unsupported; 1064 hw->phy.type = ixgbe_phy_sfp_unsupported;
1054 status = IXGBE_ERR_SFP_NOT_SUPPORTED; 1065 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
1055 goto out; 1066 goto out;
@@ -1064,7 +1075,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
1064 hw->mac.ops.get_device_caps(hw, &enforce_sfp); 1075 hw->mac.ops.get_device_caps(hw, &enforce_sfp);
1065 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) && 1076 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
1066 !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) || 1077 !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) ||
1067 (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1))) { 1078 (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) ||
1079 (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0) ||
1080 (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1))) {
1068 /* Make sure we're a supported PHY type */ 1081 /* Make sure we're a supported PHY type */
1069 if (hw->phy.type == ixgbe_phy_sfp_intel) { 1082 if (hw->phy.type == ixgbe_phy_sfp_intel) {
1070 status = 0; 1083 status = 0;
@@ -1128,10 +1141,12 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
1128 * SR modules 1141 * SR modules
1129 */ 1142 */
1130 if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 || 1143 if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
1131 sfp_type == ixgbe_sfp_type_1g_cu_core0) 1144 sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1145 sfp_type == ixgbe_sfp_type_1g_sx_core0)
1132 sfp_type = ixgbe_sfp_type_srlr_core0; 1146 sfp_type = ixgbe_sfp_type_srlr_core0;
1133 else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 || 1147 else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
1134 sfp_type == ixgbe_sfp_type_1g_cu_core1) 1148 sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1149 sfp_type == ixgbe_sfp_type_1g_sx_core1)
1135 sfp_type = ixgbe_sfp_type_srlr_core1; 1150 sfp_type = ixgbe_sfp_type_srlr_core1;
1136 1151
1137 /* Read offset to PHY init contents */ 1152 /* Read offset to PHY init contents */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index dcebd128becf..3456d5617143 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -26,6 +26,7 @@
26*******************************************************************************/ 26*******************************************************************************/
27#include "ixgbe.h" 27#include "ixgbe.h"
28#include <linux/export.h> 28#include <linux/export.h>
29#include <linux/ptp_classify.h>
29 30
30/* 31/*
31 * The 82599 and the X540 do not have true 64bit nanosecond scale 32 * The 82599 and the X540 do not have true 64bit nanosecond scale
@@ -100,9 +101,13 @@
100#define NSECS_PER_SEC 1000000000ULL 101#define NSECS_PER_SEC 1000000000ULL
101#endif 102#endif
102 103
104static struct sock_filter ptp_filter[] = {
105 PTP_FILTER
106};
107
103/** 108/**
104 * ixgbe_ptp_read - read raw cycle counter (to be used by time counter) 109 * ixgbe_ptp_read - read raw cycle counter (to be used by time counter)
105 * @cc - the cyclecounter structure 110 * @cc: the cyclecounter structure
106 * 111 *
107 * this function reads the cyclecounter registers and is called by the 112 * this function reads the cyclecounter registers and is called by the
108 * cyclecounter structure used to construct a ns counter from the 113 * cyclecounter structure used to construct a ns counter from the
@@ -123,8 +128,8 @@ static cycle_t ixgbe_ptp_read(const struct cyclecounter *cc)
123 128
124/** 129/**
125 * ixgbe_ptp_adjfreq 130 * ixgbe_ptp_adjfreq
126 * @ptp - the ptp clock structure 131 * @ptp: the ptp clock structure
127 * @ppb - parts per billion adjustment from base 132 * @ppb: parts per billion adjustment from base
128 * 133 *
129 * adjust the frequency of the ptp cycle counter by the 134 * adjust the frequency of the ptp cycle counter by the
130 * indicated ppb from the base frequency. 135 * indicated ppb from the base frequency.
@@ -170,8 +175,8 @@ static int ixgbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
170 175
171/** 176/**
172 * ixgbe_ptp_adjtime 177 * ixgbe_ptp_adjtime
173 * @ptp - the ptp clock structure 178 * @ptp: the ptp clock structure
174 * @delta - offset to adjust the cycle counter by 179 * @delta: offset to adjust the cycle counter by
175 * 180 *
176 * adjust the timer by resetting the timecounter structure. 181 * adjust the timer by resetting the timecounter structure.
177 */ 182 */
@@ -198,8 +203,8 @@ static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
198 203
199/** 204/**
200 * ixgbe_ptp_gettime 205 * ixgbe_ptp_gettime
201 * @ptp - the ptp clock structure 206 * @ptp: the ptp clock structure
202 * @ts - timespec structure to hold the current time value 207 * @ts: timespec structure to hold the current time value
203 * 208 *
204 * read the timecounter and return the correct value on ns, 209 * read the timecounter and return the correct value on ns,
205 * after converting it into a struct timespec. 210 * after converting it into a struct timespec.
@@ -224,8 +229,8 @@ static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
224 229
225/** 230/**
226 * ixgbe_ptp_settime 231 * ixgbe_ptp_settime
227 * @ptp - the ptp clock structure 232 * @ptp: the ptp clock structure
228 * @ts - the timespec containing the new time for the cycle counter 233 * @ts: the timespec containing the new time for the cycle counter
229 * 234 *
230 * reset the timecounter to use a new base value instead of the kernel 235 * reset the timecounter to use a new base value instead of the kernel
231 * wall timer value. 236 * wall timer value.
@@ -251,9 +256,9 @@ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp,
251 256
252/** 257/**
253 * ixgbe_ptp_enable 258 * ixgbe_ptp_enable
254 * @ptp - the ptp clock structure 259 * @ptp: the ptp clock structure
255 * @rq - the requested feature to change 260 * @rq: the requested feature to change
256 * @on - whether to enable or disable the feature 261 * @on: whether to enable or disable the feature
257 * 262 *
258 * enable (or disable) ancillary features of the phc subsystem. 263 * enable (or disable) ancillary features of the phc subsystem.
259 * our driver only supports the PPS feature on the X540 264 * our driver only supports the PPS feature on the X540
@@ -289,8 +294,8 @@ static int ixgbe_ptp_enable(struct ptp_clock_info *ptp,
289 294
290/** 295/**
291 * ixgbe_ptp_check_pps_event 296 * ixgbe_ptp_check_pps_event
292 * @adapter - the private adapter structure 297 * @adapter: the private adapter structure
293 * @eicr - the interrupt cause register value 298 * @eicr: the interrupt cause register value
294 * 299 *
295 * This function is called by the interrupt routine when checking for 300 * This function is called by the interrupt routine when checking for
296 * interrupts. It will check and handle a pps event. 301 * interrupts. It will check and handle a pps event.
@@ -307,20 +312,21 @@ void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr)
307 !(adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED)) 312 !(adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED))
308 return; 313 return;
309 314
310 switch (hw->mac.type) { 315 if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) {
311 case ixgbe_mac_X540: 316 switch (hw->mac.type) {
312 if (eicr & IXGBE_EICR_TIMESYNC) 317 case ixgbe_mac_X540:
313 ptp_clock_event(adapter->ptp_clock, &event); 318 ptp_clock_event(adapter->ptp_clock, &event);
314 break; 319 break;
315 default: 320 default:
316 break; 321 break;
322 }
317 } 323 }
318} 324}
319 325
320/** 326/**
321 * ixgbe_ptp_enable_sdp 327 * ixgbe_ptp_enable_sdp
322 * @hw - the hardware private structure 328 * @hw: the hardware private structure
323 * @shift - the clock shift for calculating nanoseconds 329 * @shift: the clock shift for calculating nanoseconds
324 * 330 *
325 * this function enables the clock out feature on the sdp0 for the 331 * this function enables the clock out feature on the sdp0 for the
326 * X540 device. It will create a 1second periodic output that can be 332 * X540 device. It will create a 1second periodic output that can be
@@ -393,7 +399,7 @@ static void ixgbe_ptp_enable_sdp(struct ixgbe_hw *hw, int shift)
393 399
394/** 400/**
395 * ixgbe_ptp_disable_sdp 401 * ixgbe_ptp_disable_sdp
396 * @hw - the private hardware structure 402 * @hw: the private hardware structure
397 * 403 *
398 * this function disables the auxiliary SDP clock out feature 404 * this function disables the auxiliary SDP clock out feature
399 */ 405 */
@@ -425,6 +431,68 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
425} 431}
426 432
427/** 433/**
434 * ixgbe_ptp_match - determine if this skb matches a ptp packet
435 * @skb: pointer to the skb
436 * @hwtstamp: pointer to the hwtstamp_config to check
437 *
438 * Determine whether the skb should have been timestamped, assuming the
439 * hwtstamp was set via the hwtstamp ioctl. Returns non-zero when the packet
440 * should have a timestamp waiting in the registers, and 0 otherwise.
441 *
442 * V1 packets have to check the version type to determine whether they are
443 * correct. However, we can't directly access the data because it might be
444 * fragmented in the SKB, in paged memory. In order to work around this, we
445 * use skb_copy_bits which will properly copy the data whether it is in the
446 * paged memory fragments or not. We have to copy the IP header as well as the
447 * message type.
448 */
449static int ixgbe_ptp_match(struct sk_buff *skb, int rx_filter)
450{
451 struct iphdr iph;
452 u8 msgtype;
453 unsigned int type, offset;
454
455 if (rx_filter == HWTSTAMP_FILTER_NONE)
456 return 0;
457
458 type = sk_run_filter(skb, ptp_filter);
459
460 if (likely(rx_filter == HWTSTAMP_FILTER_PTP_V2_EVENT))
461 return type & PTP_CLASS_V2;
462
463 /* For the remaining cases actually check message type */
464 switch (type) {
465 case PTP_CLASS_V1_IPV4:
466 skb_copy_bits(skb, OFF_IHL, &iph, sizeof(iph));
467 offset = ETH_HLEN + (iph.ihl << 2) + UDP_HLEN + OFF_PTP_CONTROL;
468 break;
469 case PTP_CLASS_V1_IPV6:
470 offset = OFF_PTP6 + OFF_PTP_CONTROL;
471 break;
472 default:
473 /* other cases invalid or handled above */
474 return 0;
475 }
476
477 /* Make sure our buffer is long enough */
478 if (skb->len < offset)
479 return 0;
480
481 skb_copy_bits(skb, offset, &msgtype, sizeof(msgtype));
482
483 switch (rx_filter) {
484 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
485 return (msgtype == IXGBE_RXMTRL_V1_SYNC_MSG);
486 break;
487 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
488 return (msgtype == IXGBE_RXMTRL_V1_DELAY_REQ_MSG);
489 break;
490 default:
491 return 0;
492 }
493}
494
495/**
428 * ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp 496 * ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp
429 * @q_vector: structure containing interrupt and ring information 497 * @q_vector: structure containing interrupt and ring information
430 * @skb: particular skb to send timestamp with 498 * @skb: particular skb to send timestamp with
@@ -473,6 +541,7 @@ void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
473/** 541/**
474 * ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp 542 * ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp
475 * @q_vector: structure containing interrupt and ring information 543 * @q_vector: structure containing interrupt and ring information
544 * @rx_desc: the rx descriptor
476 * @skb: particular skb to send timestamp with 545 * @skb: particular skb to send timestamp with
477 * 546 *
478 * if the timestamp is valid, we convert it into the timecounter ns 547 * if the timestamp is valid, we convert it into the timecounter ns
@@ -480,6 +549,7 @@ void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
480 * is passed up the network stack 549 * is passed up the network stack
481 */ 550 */
482void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, 551void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
552 union ixgbe_adv_rx_desc *rx_desc,
483 struct sk_buff *skb) 553 struct sk_buff *skb)
484{ 554{
485 struct ixgbe_adapter *adapter; 555 struct ixgbe_adapter *adapter;
@@ -497,21 +567,33 @@ void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
497 hw = &adapter->hw; 567 hw = &adapter->hw;
498 568
499 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 569 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
570
571 /* Check if we have a valid timestamp and make sure the skb should
572 * have been timestamped */
573 if (likely(!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID) ||
574 !ixgbe_ptp_match(skb, adapter->rx_hwtstamp_filter)))
575 return;
576
577 /*
578 * Always read the registers, in order to clear a possible fault
579 * because of stagnant RX timestamp values for a packet that never
580 * reached the queue.
581 */
500 regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); 582 regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
501 regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32; 583 regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32;
502 584
503 /* 585 /*
504 * If this bit is set, then the RX registers contain the time stamp. No 586 * If the timestamp bit is set in the packet's descriptor, we know the
505 * other packet will be time stamped until we read these registers, so 587 * timestamp belongs to this packet. No other packet can be
506 * read the registers to make them available again. Because only one 588 * timestamped until the registers for timestamping have been read.
507 * packet can be time stamped at a time, we know that the register 589 * Therefor only one packet with this bit can be in the queue at a
508 * values must belong to this one here and therefore we don't need to 590 * time, and the rx timestamp values that were in the registers belong
509 * compare any of the additional attributes stored for it. 591 * to this packet.
510 * 592 *
511 * If nothing went wrong, then it should have a skb_shared_tx that we 593 * If nothing went wrong, then it should have a skb_shared_tx that we
512 * can turn into a skb_shared_hwtstamps. 594 * can turn into a skb_shared_hwtstamps.
513 */ 595 */
514 if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) 596 if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
515 return; 597 return;
516 598
517 spin_lock_irqsave(&adapter->tmreg_lock, flags); 599 spin_lock_irqsave(&adapter->tmreg_lock, flags);
@@ -539,6 +621,11 @@ void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
539 * type has to be specified. Matching the kind of event packet is 621 * type has to be specified. Matching the kind of event packet is
540 * not supported, with the exception of "all V2 events regardless of 622 * not supported, with the exception of "all V2 events regardless of
541 * level 2 or 4". 623 * level 2 or 4".
624 *
625 * Since hardware always timestamps Path delay packets when timestamping V2
626 * packets, regardless of the type specified in the register, only use V2
627 * Event mode. This more accurately tells the user what the hardware is going
628 * to do anyways.
542 */ 629 */
543int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, 630int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
544 struct ifreq *ifr, int cmd) 631 struct ifreq *ifr, int cmd)
@@ -582,41 +669,30 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
582 tsync_rx_mtrl = IXGBE_RXMTRL_V1_DELAY_REQ_MSG; 669 tsync_rx_mtrl = IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
583 is_l4 = true; 670 is_l4 = true;
584 break; 671 break;
672 case HWTSTAMP_FILTER_PTP_V2_EVENT:
673 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
674 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
585 case HWTSTAMP_FILTER_PTP_V2_SYNC: 675 case HWTSTAMP_FILTER_PTP_V2_SYNC:
586 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 676 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
587 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 677 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
588 tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2;
589 tsync_rx_mtrl = IXGBE_RXMTRL_V2_SYNC_MSG;
590 is_l2 = true;
591 is_l4 = true;
592 config.rx_filter = HWTSTAMP_FILTER_SOME;
593 break;
594 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 678 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
595 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 679 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
596 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 680 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
597 tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2;
598 tsync_rx_mtrl = IXGBE_RXMTRL_V2_DELAY_REQ_MSG;
599 is_l2 = true;
600 is_l4 = true;
601 config.rx_filter = HWTSTAMP_FILTER_SOME;
602 break;
603 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
604 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
605 case HWTSTAMP_FILTER_PTP_V2_EVENT:
606 tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2; 681 tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
607 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
608 is_l2 = true; 682 is_l2 = true;
609 is_l4 = true; 683 is_l4 = true;
684 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
610 break; 685 break;
611 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 686 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
612 case HWTSTAMP_FILTER_ALL: 687 case HWTSTAMP_FILTER_ALL:
613 default: 688 default:
614 /* 689 /*
615 * register RXMTRL must be set, therefore it is not 690 * register RXMTRL must be set in order to do V1 packets,
616 * possible to time stamp both V1 Sync and Delay_Req messages 691 * therefore it is not possible to time stamp both V1 Sync and
617 * and hardware does not support timestamping all packets 692 * Delay_Req messages and hardware does not support
618 * => return error 693 * timestamping all packets => return error
619 */ 694 */
695 config.rx_filter = HWTSTAMP_FILTER_NONE;
620 return -ERANGE; 696 return -ERANGE;
621 } 697 }
622 698
@@ -626,6 +702,9 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
626 return 0; 702 return 0;
627 } 703 }
628 704
705 /* Store filter value for later use */
706 adapter->rx_hwtstamp_filter = config.rx_filter;
707
629 /* define ethertype filter for timestamped packets */ 708 /* define ethertype filter for timestamped packets */
630 if (is_l2) 709 if (is_l2)
631 IXGBE_WRITE_REG(hw, IXGBE_ETQF(3), 710 IXGBE_WRITE_REG(hw, IXGBE_ETQF(3),
@@ -690,7 +769,7 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
690 769
691/** 770/**
692 * ixgbe_ptp_start_cyclecounter - create the cycle counter from hw 771 * ixgbe_ptp_start_cyclecounter - create the cycle counter from hw
693 * @adapter - pointer to the adapter structure 772 * @adapter: pointer to the adapter structure
694 * 773 *
695 * this function initializes the timecounter and cyclecounter 774 * this function initializes the timecounter and cyclecounter
696 * structures for use in generated a ns counter from the arbitrary 775 * structures for use in generated a ns counter from the arbitrary
@@ -826,7 +905,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
826 905
827/** 906/**
828 * ixgbe_ptp_init 907 * ixgbe_ptp_init
829 * @adapter - the ixgbe private adapter structure 908 * @adapter: the ixgbe private adapter structure
830 * 909 *
831 * This function performs the required steps for enabling ptp 910 * This function performs the required steps for enabling ptp
832 * support. If ptp support has already been loaded it simply calls the 911 * support. If ptp support has already been loaded it simply calls the
@@ -870,6 +949,10 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
870 return; 949 return;
871 } 950 }
872 951
952 /* initialize the ptp filter */
953 if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter)))
954 e_dev_warn("ptp_filter_init failed\n");
955
873 spin_lock_init(&adapter->tmreg_lock); 956 spin_lock_init(&adapter->tmreg_lock);
874 957
875 ixgbe_ptp_start_cyclecounter(adapter); 958 ixgbe_ptp_start_cyclecounter(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 2d971d18696e..d2854434ad12 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -107,15 +107,21 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
107 "VF drivers to avoid spoofed packet errors\n"); 107 "VF drivers to avoid spoofed packet errors\n");
108 } else { 108 } else {
109 err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); 109 err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
110 if (err) {
111 e_err(probe, "Failed to enable PCI sriov: %d\n", err);
112 goto err_novfs;
113 }
110 } 114 }
111 if (err) {
112 e_err(probe, "Failed to enable PCI sriov: %d\n", err);
113 goto err_novfs;
114 }
115 adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
116 115
116 adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
117 e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs); 117 e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs);
118 118
119 /* Enable VMDq flag so device will be set in VM mode */
120 adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED;
121 if (!adapter->ring_feature[RING_F_VMDQ].limit)
122 adapter->ring_feature[RING_F_VMDQ].limit = 1;
123 adapter->ring_feature[RING_F_VMDQ].offset = adapter->num_vfs;
124
119 num_vf_macvlans = hw->mac.num_rar_entries - 125 num_vf_macvlans = hw->mac.num_rar_entries -
120 (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs); 126 (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs);
121 127
@@ -146,12 +152,39 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
146 * and memory allocated set up the mailbox parameters 152 * and memory allocated set up the mailbox parameters
147 */ 153 */
148 ixgbe_init_mbx_params_pf(hw); 154 ixgbe_init_mbx_params_pf(hw);
149 memcpy(&hw->mbx.ops, ii->mbx_ops, 155 memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
150 sizeof(hw->mbx.ops)); 156
157 /* limit trafffic classes based on VFs enabled */
158 if ((adapter->hw.mac.type == ixgbe_mac_82599EB) &&
159 (adapter->num_vfs < 16)) {
160 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
161 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
162 } else if (adapter->num_vfs < 32) {
163 adapter->dcb_cfg.num_tcs.pg_tcs = 4;
164 adapter->dcb_cfg.num_tcs.pfc_tcs = 4;
165 } else {
166 adapter->dcb_cfg.num_tcs.pg_tcs = 1;
167 adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
168 }
169
170 /* We do not support RSS w/ SR-IOV */
171 adapter->ring_feature[RING_F_RSS].limit = 1;
151 172
152 /* Disable RSC when in SR-IOV mode */ 173 /* Disable RSC when in SR-IOV mode */
153 adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | 174 adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
154 IXGBE_FLAG2_RSC_ENABLED); 175 IXGBE_FLAG2_RSC_ENABLED);
176
177#ifdef IXGBE_FCOE
178 /*
179 * When SR-IOV is enabled 82599 cannot support jumbo frames
180 * so we must disable FCoE because we cannot support FCoE MTU.
181 */
182 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
183 adapter->flags &= ~(IXGBE_FLAG_FCOE_ENABLED |
184 IXGBE_FLAG_FCOE_CAPABLE);
185#endif
186
187 /* enable spoof checking for all VFs */
155 for (i = 0; i < adapter->num_vfs; i++) 188 for (i = 0; i < adapter->num_vfs; i++)
156 adapter->vfinfo[i].spoofchk_enabled = true; 189 adapter->vfinfo[i].spoofchk_enabled = true;
157 return; 190 return;
@@ -171,7 +204,6 @@ err_novfs:
171void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) 204void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
172{ 205{
173 struct ixgbe_hw *hw = &adapter->hw; 206 struct ixgbe_hw *hw = &adapter->hw;
174 u32 gcr;
175 u32 gpie; 207 u32 gpie;
176 u32 vmdctl; 208 u32 vmdctl;
177 int i; 209 int i;
@@ -182,9 +214,7 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
182#endif 214#endif
183 215
184 /* turn off device IOV mode */ 216 /* turn off device IOV mode */
185 gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); 217 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 0);
186 gcr &= ~(IXGBE_GCR_EXT_SRIOV);
187 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
188 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 218 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
189 gpie &= ~IXGBE_GPIE_VTMODE_MASK; 219 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
190 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 220 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
@@ -467,7 +497,7 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
467 bool enable = ((event_mask & 0x10000000U) != 0); 497 bool enable = ((event_mask & 0x10000000U) != 0);
468 498
469 if (enable) { 499 if (enable) {
470 random_ether_addr(vf_mac_addr); 500 eth_random_addr(vf_mac_addr);
471 e_info(probe, "IOV: VF %d is enabled MAC %pM\n", 501 e_info(probe, "IOV: VF %d is enabled MAC %pM\n",
472 vfn, vf_mac_addr); 502 vfn, vf_mac_addr);
473 /* 503 /*
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
index 1d80b1cefa6a..16ddf14e8ba4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
@@ -37,7 +37,6 @@
37#include <linux/netdevice.h> 37#include <linux/netdevice.h>
38#include <linux/hwmon.h> 38#include <linux/hwmon.h>
39 39
40#ifdef CONFIG_IXGBE_HWMON
41/* hwmon callback functions */ 40/* hwmon callback functions */
42static ssize_t ixgbe_hwmon_show_location(struct device *dev, 41static ssize_t ixgbe_hwmon_show_location(struct device *dev,
43 struct device_attribute *attr, 42 struct device_attribute *attr,
@@ -96,11 +95,11 @@ static ssize_t ixgbe_hwmon_show_maxopthresh(struct device *dev,
96 return sprintf(buf, "%u\n", value); 95 return sprintf(buf, "%u\n", value);
97} 96}
98 97
99/* 98/**
100 * ixgbe_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file. 99 * ixgbe_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file.
101 * @ adapter: pointer to the adapter structure 100 * @adapter: pointer to the adapter structure
102 * @ offset: offset in the eeprom sensor data table 101 * @offset: offset in the eeprom sensor data table
103 * @ type: type of sensor data to display 102 * @type: type of sensor data to display
104 * 103 *
105 * For each file we want in hwmon's sysfs interface we need a device_attribute 104 * For each file we want in hwmon's sysfs interface we need a device_attribute
106 * This is included in our hwmon_attr struct that contains the references to 105 * This is included in our hwmon_attr struct that contains the references to
@@ -241,5 +240,4 @@ err:
241exit: 240exit:
242 return rc; 241 return rc;
243} 242}
244#endif /* CONFIG_IXGBE_HWMON */
245 243
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 204848d2448c..7416d22ec227 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -2419,7 +2419,7 @@ typedef u32 ixgbe_physical_layer;
2419 */ 2419 */
2420 2420
2421/* BitTimes (BT) conversion */ 2421/* BitTimes (BT) conversion */
2422#define IXGBE_BT2KB(BT) ((BT + 1023) / (8 * 1024)) 2422#define IXGBE_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024))
2423#define IXGBE_B2BT(BT) (BT * 8) 2423#define IXGBE_B2BT(BT) (BT * 8)
2424 2424
2425/* Calculate Delay to respond to PFC */ 2425/* Calculate Delay to respond to PFC */
@@ -2450,24 +2450,31 @@ typedef u32 ixgbe_physical_layer;
2450#define IXGBE_PCI_DELAY 10000 2450#define IXGBE_PCI_DELAY 10000
2451 2451
2452/* Calculate X540 delay value in bit times */ 2452/* Calculate X540 delay value in bit times */
2453#define IXGBE_FILL_RATE (36 / 25) 2453#define IXGBE_DV_X540(_max_frame_link, _max_frame_tc) \
2454 2454 ((36 * \
2455#define IXGBE_DV_X540(LINK, TC) (IXGBE_FILL_RATE * \ 2455 (IXGBE_B2BT(_max_frame_link) + \
2456 (IXGBE_B2BT(LINK) + IXGBE_PFC_D + \ 2456 IXGBE_PFC_D + \
2457 (2 * IXGBE_CABLE_DC) + \ 2457 (2 * IXGBE_CABLE_DC) + \
2458 (2 * IXGBE_ID_X540) + \ 2458 (2 * IXGBE_ID_X540) + \
2459 IXGBE_HD + IXGBE_B2BT(TC))) 2459 IXGBE_HD) / 25 + 1) + \
2460 2 * IXGBE_B2BT(_max_frame_tc))
2460 2461
2461/* Calculate 82599, 82598 delay value in bit times */ 2462/* Calculate 82599, 82598 delay value in bit times */
2462#define IXGBE_DV(LINK, TC) (IXGBE_FILL_RATE * \ 2463#define IXGBE_DV(_max_frame_link, _max_frame_tc) \
2463 (IXGBE_B2BT(LINK) + IXGBE_PFC_D + \ 2464 ((36 * \
2464 (2 * IXGBE_CABLE_DC) + (2 * IXGBE_ID) + \ 2465 (IXGBE_B2BT(_max_frame_link) + \
2465 IXGBE_HD + IXGBE_B2BT(TC))) 2466 IXGBE_PFC_D + \
2467 (2 * IXGBE_CABLE_DC) + \
2468 (2 * IXGBE_ID) + \
2469 IXGBE_HD) / 25 + 1) + \
2470 2 * IXGBE_B2BT(_max_frame_tc))
2466 2471
2467/* Calculate low threshold delay values */ 2472/* Calculate low threshold delay values */
2468#define IXGBE_LOW_DV_X540(TC) (2 * IXGBE_B2BT(TC) + \ 2473#define IXGBE_LOW_DV_X540(_max_frame_tc) \
2469 (IXGBE_FILL_RATE * IXGBE_PCI_DELAY)) 2474 (2 * IXGBE_B2BT(_max_frame_tc) + \
2470#define IXGBE_LOW_DV(TC) (2 * IXGBE_LOW_DV_X540(TC)) 2475 (36 * IXGBE_PCI_DELAY / 25) + 1)
2476#define IXGBE_LOW_DV(_max_frame_tc) \
2477 (2 * IXGBE_LOW_DV_X540(_max_frame_tc))
2471 2478
2472/* Software ATR hash keys */ 2479/* Software ATR hash keys */
2473#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2 2480#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2
@@ -2597,6 +2604,8 @@ enum ixgbe_sfp_type {
2597 ixgbe_sfp_type_da_act_lmt_core1 = 8, 2604 ixgbe_sfp_type_da_act_lmt_core1 = 8,
2598 ixgbe_sfp_type_1g_cu_core0 = 9, 2605 ixgbe_sfp_type_1g_cu_core0 = 9,
2599 ixgbe_sfp_type_1g_cu_core1 = 10, 2606 ixgbe_sfp_type_1g_cu_core1 = 10,
2607 ixgbe_sfp_type_1g_sx_core0 = 11,
2608 ixgbe_sfp_type_1g_sx_core1 = 12,
2600 ixgbe_sfp_type_not_present = 0xFFFE, 2609 ixgbe_sfp_type_not_present = 0xFFFE,
2601 ixgbe_sfp_type_unknown = 0xFFFF 2610 ixgbe_sfp_type_unknown = 0xFFFF
2602}; 2611};
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index e09a6cc633bb..418af827b230 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -251,6 +251,7 @@ struct ixgbe_adv_tx_context_desc {
251#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ 251#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
252#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ 252#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
253#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ 253#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
254#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */
254#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ 255#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
255#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ 256#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
256 IXGBE_ADVTXD_POPTS_SHIFT) 257 IXGBE_ADVTXD_POPTS_SHIFT)
@@ -264,32 +265,9 @@ struct ixgbe_adv_tx_context_desc {
264 265
265/* Interrupt register bitmasks */ 266/* Interrupt register bitmasks */
266 267
267/* Extended Interrupt Cause Read */
268#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */
269#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
270#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
271
272/* Extended Interrupt Cause Set */
273#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
274#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
275#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
276
277/* Extended Interrupt Mask Set */
278#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
279#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
280#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
281
282/* Extended Interrupt Mask Clear */
283#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
284#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
285#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
286
287#define IXGBE_EIMS_ENABLE_MASK ( \
288 IXGBE_EIMS_RTX_QUEUE | \
289 IXGBE_EIMS_MAILBOX | \
290 IXGBE_EIMS_OTHER)
291
292#define IXGBE_EITR_CNT_WDIS 0x80000000 268#define IXGBE_EITR_CNT_WDIS 0x80000000
269#define IXGBE_MAX_EITR 0x00000FF8
270#define IXGBE_MIN_EITR 8
293 271
294/* Error Codes */ 272/* Error Codes */
295#define IXGBE_ERR_INVALID_MAC_ADDR -1 273#define IXGBE_ERR_INVALID_MAC_ADDR -1
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index e8dddf572d38..8f2070439b59 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -43,7 +43,6 @@
43 43
44#define IXGBE_ALL_RAR_ENTRIES 16 44#define IXGBE_ALL_RAR_ENTRIES 16
45 45
46#ifdef ETHTOOL_GSTATS
47struct ixgbe_stats { 46struct ixgbe_stats {
48 char stat_string[ETH_GSTRING_LEN]; 47 char stat_string[ETH_GSTRING_LEN];
49 int sizeof_stat; 48 int sizeof_stat;
@@ -75,21 +74,17 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
75 zero_base)}, 74 zero_base)},
76 {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base, 75 {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base,
77 zero_base)}, 76 zero_base)},
78 {"rx_header_split", IXGBEVF_STAT(rx_hdr_split, zero_base, zero_base)},
79}; 77};
80 78
81#define IXGBE_QUEUE_STATS_LEN 0 79#define IXGBE_QUEUE_STATS_LEN 0
82#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) 80#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
83 81
84#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN) 82#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
85#endif /* ETHTOOL_GSTATS */
86#ifdef ETHTOOL_TEST
87static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { 83static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
88 "Register test (offline)", 84 "Register test (offline)",
89 "Link test (on/offline)" 85 "Link test (on/offline)"
90}; 86};
91#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) 87#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
92#endif /* ETHTOOL_TEST */
93 88
94static int ixgbevf_get_settings(struct net_device *netdev, 89static int ixgbevf_get_settings(struct net_device *netdev,
95 struct ethtool_cmd *ecmd) 90 struct ethtool_cmd *ecmd)
@@ -289,13 +284,11 @@ static void ixgbevf_get_ringparam(struct net_device *netdev,
289 struct ethtool_ringparam *ring) 284 struct ethtool_ringparam *ring)
290{ 285{
291 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 286 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
292 struct ixgbevf_ring *tx_ring = adapter->tx_ring;
293 struct ixgbevf_ring *rx_ring = adapter->rx_ring;
294 287
295 ring->rx_max_pending = IXGBEVF_MAX_RXD; 288 ring->rx_max_pending = IXGBEVF_MAX_RXD;
296 ring->tx_max_pending = IXGBEVF_MAX_TXD; 289 ring->tx_max_pending = IXGBEVF_MAX_TXD;
297 ring->rx_pending = rx_ring->count; 290 ring->rx_pending = adapter->rx_ring_count;
298 ring->tx_pending = tx_ring->count; 291 ring->tx_pending = adapter->tx_ring_count;
299} 292}
300 293
301static int ixgbevf_set_ringparam(struct net_device *netdev, 294static int ixgbevf_set_ringparam(struct net_device *netdev,
@@ -303,33 +296,28 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
303{ 296{
304 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 297 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
305 struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL; 298 struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
306 int i, err = 0;
307 u32 new_rx_count, new_tx_count; 299 u32 new_rx_count, new_tx_count;
300 int i, err = 0;
308 301
309 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 302 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
310 return -EINVAL; 303 return -EINVAL;
311 304
312 new_rx_count = max(ring->rx_pending, (u32)IXGBEVF_MIN_RXD); 305 new_tx_count = max_t(u32, ring->tx_pending, IXGBEVF_MIN_TXD);
313 new_rx_count = min(new_rx_count, (u32)IXGBEVF_MAX_RXD); 306 new_tx_count = min_t(u32, new_tx_count, IXGBEVF_MAX_TXD);
314 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
315
316 new_tx_count = max(ring->tx_pending, (u32)IXGBEVF_MIN_TXD);
317 new_tx_count = min(new_tx_count, (u32)IXGBEVF_MAX_TXD);
318 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); 307 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
319 308
320 if ((new_tx_count == adapter->tx_ring->count) && 309 new_rx_count = max_t(u32, ring->rx_pending, IXGBEVF_MIN_RXD);
321 (new_rx_count == adapter->rx_ring->count)) { 310 new_rx_count = min_t(u32, new_rx_count, IXGBEVF_MAX_RXD);
322 /* nothing to do */ 311 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
312
313 /* if nothing to do return success */
314 if ((new_tx_count == adapter->tx_ring_count) &&
315 (new_rx_count == adapter->rx_ring_count))
323 return 0; 316 return 0;
324 }
325 317
326 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) 318 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
327 msleep(1); 319 usleep_range(1000, 2000);
328 320
329 /*
330 * If the adapter isn't up and running then just set the
331 * new parameters and scurry for the exits.
332 */
333 if (!netif_running(adapter->netdev)) { 321 if (!netif_running(adapter->netdev)) {
334 for (i = 0; i < adapter->num_tx_queues; i++) 322 for (i = 0; i < adapter->num_tx_queues; i++)
335 adapter->tx_ring[i].count = new_tx_count; 323 adapter->tx_ring[i].count = new_tx_count;
@@ -340,82 +328,98 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
340 goto clear_reset; 328 goto clear_reset;
341 } 329 }
342 330
343 tx_ring = kcalloc(adapter->num_tx_queues, 331 if (new_tx_count != adapter->tx_ring_count) {
344 sizeof(struct ixgbevf_ring), GFP_KERNEL); 332 tx_ring = vmalloc(adapter->num_tx_queues * sizeof(*tx_ring));
345 if (!tx_ring) { 333 if (!tx_ring) {
346 err = -ENOMEM; 334 err = -ENOMEM;
347 goto clear_reset; 335 goto clear_reset;
348 } 336 }
349
350 rx_ring = kcalloc(adapter->num_rx_queues,
351 sizeof(struct ixgbevf_ring), GFP_KERNEL);
352 if (!rx_ring) {
353 err = -ENOMEM;
354 goto err_rx_setup;
355 }
356
357 ixgbevf_down(adapter);
358 337
359 memcpy(tx_ring, adapter->tx_ring, 338 for (i = 0; i < adapter->num_tx_queues; i++) {
360 adapter->num_tx_queues * sizeof(struct ixgbevf_ring)); 339 /* clone ring and setup updated count */
361 for (i = 0; i < adapter->num_tx_queues; i++) { 340 tx_ring[i] = adapter->tx_ring[i];
362 tx_ring[i].count = new_tx_count; 341 tx_ring[i].count = new_tx_count;
363 err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]); 342 err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]);
364 if (err) { 343 if (!err)
344 continue;
365 while (i) { 345 while (i) {
366 i--; 346 i--;
367 ixgbevf_free_tx_resources(adapter, 347 ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
368 &tx_ring[i]);
369 } 348 }
370 goto err_tx_ring_setup; 349
350 vfree(tx_ring);
351 tx_ring = NULL;
352
353 goto clear_reset;
371 } 354 }
372 tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
373 } 355 }
374 356
375 memcpy(rx_ring, adapter->rx_ring, 357 if (new_rx_count != adapter->rx_ring_count) {
376 adapter->num_rx_queues * sizeof(struct ixgbevf_ring)); 358 rx_ring = vmalloc(adapter->num_rx_queues * sizeof(*rx_ring));
377 for (i = 0; i < adapter->num_rx_queues; i++) { 359 if (!rx_ring) {
378 rx_ring[i].count = new_rx_count; 360 err = -ENOMEM;
379 err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]); 361 goto clear_reset;
380 if (err) { 362 }
363
364 for (i = 0; i < adapter->num_rx_queues; i++) {
365 /* clone ring and setup updated count */
366 rx_ring[i] = adapter->rx_ring[i];
367 rx_ring[i].count = new_rx_count;
368 err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
369 if (!err)
370 continue;
381 while (i) { 371 while (i) {
382 i--; 372 i--;
383 ixgbevf_free_rx_resources(adapter, 373 ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
384 &rx_ring[i]);
385 } 374 }
386 goto err_rx_ring_setup; 375
376 vfree(rx_ring);
377 rx_ring = NULL;
378
379 goto clear_reset;
387 } 380 }
388 rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
389 } 381 }
390 382
391 /* 383 /* bring interface down to prepare for update */
392 * Only switch to new rings if all the prior allocations 384 ixgbevf_down(adapter);
393 * and ring setups have succeeded.
394 */
395 kfree(adapter->tx_ring);
396 adapter->tx_ring = tx_ring;
397 adapter->tx_ring_count = new_tx_count;
398
399 kfree(adapter->rx_ring);
400 adapter->rx_ring = rx_ring;
401 adapter->rx_ring_count = new_rx_count;
402 385
403 /* success! */ 386 /* Tx */
404 ixgbevf_up(adapter); 387 if (tx_ring) {
388 for (i = 0; i < adapter->num_tx_queues; i++) {
389 ixgbevf_free_tx_resources(adapter,
390 &adapter->tx_ring[i]);
391 adapter->tx_ring[i] = tx_ring[i];
392 }
393 adapter->tx_ring_count = new_tx_count;
405 394
406 goto clear_reset; 395 vfree(tx_ring);
396 tx_ring = NULL;
397 }
407 398
408err_rx_ring_setup: 399 /* Rx */
409 for(i = 0; i < adapter->num_tx_queues; i++) 400 if (rx_ring) {
410 ixgbevf_free_tx_resources(adapter, &tx_ring[i]); 401 for (i = 0; i < adapter->num_rx_queues; i++) {
402 ixgbevf_free_rx_resources(adapter,
403 &adapter->rx_ring[i]);
404 adapter->rx_ring[i] = rx_ring[i];
405 }
406 adapter->rx_ring_count = new_rx_count;
411 407
412err_tx_ring_setup: 408 vfree(rx_ring);
413 kfree(rx_ring); 409 rx_ring = NULL;
410 }
414 411
415err_rx_setup: 412 /* restore interface using new values */
416 kfree(tx_ring); 413 ixgbevf_up(adapter);
417 414
418clear_reset: 415clear_reset:
416 /* free Tx resources if Rx error is encountered */
417 if (tx_ring) {
418 for (i = 0; i < adapter->num_tx_queues; i++)
419 ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
420 vfree(tx_ring);
421 }
422
419 clear_bit(__IXGBEVF_RESETTING, &adapter->state); 423 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
420 return err; 424 return err;
421} 425}
@@ -674,10 +678,8 @@ static int ixgbevf_nway_reset(struct net_device *netdev)
674{ 678{
675 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 679 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
676 680
677 if (netif_running(netdev)) { 681 if (netif_running(netdev))
678 if (!adapter->dev_closed) 682 ixgbevf_reinit_locked(adapter);
679 ixgbevf_reinit_locked(adapter);
680 }
681 683
682 return 0; 684 return 0;
683} 685}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 0a1b99240d43..e167d1bb6dea 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -52,12 +52,12 @@ struct ixgbevf_tx_buffer {
52struct ixgbevf_rx_buffer { 52struct ixgbevf_rx_buffer {
53 struct sk_buff *skb; 53 struct sk_buff *skb;
54 dma_addr_t dma; 54 dma_addr_t dma;
55 struct page *page;
56 dma_addr_t page_dma;
57 unsigned int page_offset;
58}; 55};
59 56
60struct ixgbevf_ring { 57struct ixgbevf_ring {
58 struct ixgbevf_ring *next;
59 struct net_device *netdev;
60 struct device *dev;
61 struct ixgbevf_adapter *adapter; /* backlink */ 61 struct ixgbevf_adapter *adapter; /* backlink */
62 void *desc; /* descriptor ring memory */ 62 void *desc; /* descriptor ring memory */
63 dma_addr_t dma; /* phys. address of descriptor ring */ 63 dma_addr_t dma; /* phys. address of descriptor ring */
@@ -83,29 +83,9 @@ struct ixgbevf_ring {
83 * offset associated with this ring, which is different 83 * offset associated with this ring, which is different
84 * for DCB and RSS modes */ 84 * for DCB and RSS modes */
85 85
86#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
87 /* cpu for tx queue */
88 int cpu;
89#endif
90
91 u64 v_idx; /* maps directly to the index for this ring in the hardware
92 * vector array, can also be used for finding the bit in EICR
93 * and friends that represents the vector for this ring */
94
95 u16 work_limit; /* max work per interrupt */
96 u16 rx_buf_len; 86 u16 rx_buf_len;
97}; 87};
98 88
99enum ixgbevf_ring_f_enum {
100 RING_F_NONE = 0,
101 RING_F_ARRAY_SIZE /* must be last in enum set */
102};
103
104struct ixgbevf_ring_feature {
105 int indices;
106 int mask;
107};
108
109/* How many Rx Buffers do we bundle into one write to the hardware ? */ 89/* How many Rx Buffers do we bundle into one write to the hardware ? */
110#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ 90#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
111 91
@@ -120,8 +100,6 @@ struct ixgbevf_ring_feature {
120#define IXGBEVF_MIN_RXD 64 100#define IXGBEVF_MIN_RXD 64
121 101
122/* Supported Rx Buffer Sizes */ 102/* Supported Rx Buffer Sizes */
123#define IXGBEVF_RXBUFFER_64 64 /* Used for packet split */
124#define IXGBEVF_RXBUFFER_128 128 /* Used for packet split */
125#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */ 103#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
126#define IXGBEVF_RXBUFFER_2048 2048 104#define IXGBEVF_RXBUFFER_2048 2048
127#define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */ 105#define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
@@ -140,22 +118,42 @@ struct ixgbevf_ring_feature {
140#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 118#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
141#define IXGBE_TX_FLAGS_VLAN_SHIFT 16 119#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
142 120
121struct ixgbevf_ring_container {
122 struct ixgbevf_ring *ring; /* pointer to linked list of rings */
123 unsigned int total_bytes; /* total bytes processed this int */
124 unsigned int total_packets; /* total packets processed this int */
125 u8 count; /* total number of rings in vector */
126 u8 itr; /* current ITR setting for ring */
127};
128
129/* iterator for handling rings in ring container */
130#define ixgbevf_for_each_ring(pos, head) \
131 for (pos = (head).ring; pos != NULL; pos = pos->next)
132
143/* MAX_MSIX_Q_VECTORS of these are allocated, 133/* MAX_MSIX_Q_VECTORS of these are allocated,
144 * but we only use one per queue-specific vector. 134 * but we only use one per queue-specific vector.
145 */ 135 */
146struct ixgbevf_q_vector { 136struct ixgbevf_q_vector {
147 struct ixgbevf_adapter *adapter; 137 struct ixgbevf_adapter *adapter;
138 u16 v_idx; /* index of q_vector within array, also used for
139 * finding the bit in EICR and friends that
140 * represents the vector for this ring */
141 u16 itr; /* Interrupt throttle rate written to EITR */
148 struct napi_struct napi; 142 struct napi_struct napi;
149 DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */ 143 struct ixgbevf_ring_container rx, tx;
150 DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */ 144 char name[IFNAMSIZ + 9];
151 u8 rxr_count; /* Rx ring count assigned to this vector */
152 u8 txr_count; /* Tx ring count assigned to this vector */
153 u8 tx_itr;
154 u8 rx_itr;
155 u32 eitr;
156 int v_idx; /* vector index in list */
157}; 145};
158 146
147/*
148 * microsecond values for various ITR rates shifted by 2 to fit itr register
149 * with the first 3 bits reserved 0
150 */
151#define IXGBE_MIN_RSC_ITR 24
152#define IXGBE_100K_ITR 40
153#define IXGBE_20K_ITR 200
154#define IXGBE_10K_ITR 400
155#define IXGBE_8K_ITR 500
156
159/* Helper macros to switch between ints/sec and what the register uses. 157/* Helper macros to switch between ints/sec and what the register uses.
160 * And yes, it's the same math going both ways. The lowest value 158 * And yes, it's the same math going both ways. The lowest value
161 * supported by all of the ixgbe hardware is 8. 159 * supported by all of the ixgbe hardware is 8.
@@ -168,12 +166,12 @@ struct ixgbevf_q_vector {
168 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ 166 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
169 (R)->next_to_clean - (R)->next_to_use - 1) 167 (R)->next_to_clean - (R)->next_to_use - 1)
170 168
171#define IXGBE_RX_DESC_ADV(R, i) \ 169#define IXGBEVF_RX_DESC(R, i) \
172 (&(((union ixgbe_adv_rx_desc *)((R).desc))[i])) 170 (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
173#define IXGBE_TX_DESC_ADV(R, i) \ 171#define IXGBEVF_TX_DESC(R, i) \
174 (&(((union ixgbe_adv_tx_desc *)((R).desc))[i])) 172 (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
175#define IXGBE_TX_CTXTDESC_ADV(R, i) \ 173#define IXGBEVF_TX_CTXTDESC(R, i) \
176 (&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i])) 174 (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
177 175
178#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128 176#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
179 177
@@ -181,9 +179,8 @@ struct ixgbevf_q_vector {
181#define NON_Q_VECTORS (OTHER_VECTOR) 179#define NON_Q_VECTORS (OTHER_VECTOR)
182 180
183#define MAX_MSIX_Q_VECTORS 2 181#define MAX_MSIX_Q_VECTORS 2
184#define MAX_MSIX_COUNT 2
185 182
186#define MIN_MSIX_Q_VECTORS 2 183#define MIN_MSIX_Q_VECTORS 1
187#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) 184#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
188 185
189/* board specific private data structure */ 186/* board specific private data structure */
@@ -193,12 +190,14 @@ struct ixgbevf_adapter {
193 u16 bd_number; 190 u16 bd_number;
194 struct work_struct reset_task; 191 struct work_struct reset_task;
195 struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; 192 struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
196 char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
197 193
198 /* Interrupt Throttle Rate */ 194 /* Interrupt Throttle Rate */
199 u32 itr_setting; 195 u16 rx_itr_setting;
200 u16 eitr_low; 196 u16 tx_itr_setting;
201 u16 eitr_high; 197
198 /* interrupt masks */
199 u32 eims_enable_mask;
200 u32 eims_other;
202 201
203 /* TX */ 202 /* TX */
204 struct ixgbevf_ring *tx_ring; /* One per active queue */ 203 struct ixgbevf_ring *tx_ring; /* One per active queue */
@@ -213,18 +212,13 @@ struct ixgbevf_adapter {
213 /* RX */ 212 /* RX */
214 struct ixgbevf_ring *rx_ring; /* One per active queue */ 213 struct ixgbevf_ring *rx_ring; /* One per active queue */
215 int num_rx_queues; 214 int num_rx_queues;
216 int num_rx_pools; /* == num_rx_queues in 82598 */
217 int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
218 u64 hw_csum_rx_error; 215 u64 hw_csum_rx_error;
219 u64 hw_rx_no_dma_resources; 216 u64 hw_rx_no_dma_resources;
220 u64 hw_csum_rx_good; 217 u64 hw_csum_rx_good;
221 u64 non_eop_descs; 218 u64 non_eop_descs;
222 int num_msix_vectors; 219 int num_msix_vectors;
223 int max_msix_q_vectors; /* true count of q_vectors for device */
224 struct ixgbevf_ring_feature ring_feature[RING_F_ARRAY_SIZE];
225 struct msix_entry *msix_entries; 220 struct msix_entry *msix_entries;
226 221
227 u64 rx_hdr_split;
228 u32 alloc_rx_page_failed; 222 u32 alloc_rx_page_failed;
229 u32 alloc_rx_buff_failed; 223 u32 alloc_rx_buff_failed;
230 224
@@ -232,15 +226,8 @@ struct ixgbevf_adapter {
232 * thus the additional *_CAPABLE flags. 226 * thus the additional *_CAPABLE flags.
233 */ 227 */
234 u32 flags; 228 u32 flags;
235#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1) 229#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1)
236#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1) 230
237#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 2)
238#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 3)
239#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 4)
240#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 5)
241#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 6)
242#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 7)
243#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 8)
244 /* OS defined structs */ 231 /* OS defined structs */
245 struct net_device *netdev; 232 struct net_device *netdev;
246 struct pci_dev *pdev; 233 struct pci_dev *pdev;
@@ -254,18 +241,14 @@ struct ixgbevf_adapter {
254 u32 eitr_param; 241 u32 eitr_param;
255 242
256 unsigned long state; 243 unsigned long state;
257 u32 *config_space;
258 u64 tx_busy; 244 u64 tx_busy;
259 unsigned int tx_ring_count; 245 unsigned int tx_ring_count;
260 unsigned int rx_ring_count; 246 unsigned int rx_ring_count;
261 247
262 u32 link_speed; 248 u32 link_speed;
263 bool link_up; 249 bool link_up;
264 unsigned long link_check_timeout;
265 250
266 struct work_struct watchdog_task; 251 struct work_struct watchdog_task;
267 bool netdev_registered;
268 bool dev_closed;
269}; 252};
270 253
271enum ixbgevf_state_t { 254enum ixbgevf_state_t {
@@ -301,11 +284,9 @@ extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *,
301extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *, 284extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *,
302 struct ixgbevf_ring *); 285 struct ixgbevf_ring *);
303extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter); 286extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
304 287void ixgbevf_write_eitr(struct ixgbevf_q_vector *);
305#ifdef ETHTOOL_OPS_COMPAT
306extern int ethtool_ioctl(struct ifreq *ifr); 288extern int ethtool_ioctl(struct ifreq *ifr);
307 289
308#endif
309extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter); 290extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
310extern void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter); 291extern void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
311 292
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 41e32257a4e8..2dc78d7e297a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -42,6 +42,7 @@
42#include <linux/in.h> 42#include <linux/in.h>
43#include <linux/ip.h> 43#include <linux/ip.h>
44#include <linux/tcp.h> 44#include <linux/tcp.h>
45#include <linux/sctp.h>
45#include <linux/ipv6.h> 46#include <linux/ipv6.h>
46#include <linux/slab.h> 47#include <linux/slab.h>
47#include <net/checksum.h> 48#include <net/checksum.h>
@@ -97,9 +98,7 @@ module_param(debug, int, 0);
97MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 98MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
98 99
99/* forward decls */ 100/* forward decls */
100static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector); 101static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
101static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
102 u32 itr_reg);
103 102
104static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw, 103static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
105 struct ixgbevf_ring *rx_ring, 104 struct ixgbevf_ring *rx_ring,
@@ -115,7 +114,7 @@ static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
115 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val); 114 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
116} 115}
117 116
118/* 117/**
119 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors 118 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
120 * @adapter: pointer to adapter struct 119 * @adapter: pointer to adapter struct
121 * @direction: 0 for Rx, 1 for Tx, -1 for other causes 120 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
@@ -146,18 +145,18 @@ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
146 } 145 }
147} 146}
148 147
149static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter, 148static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
150 struct ixgbevf_tx_buffer 149 struct ixgbevf_tx_buffer
151 *tx_buffer_info) 150 *tx_buffer_info)
152{ 151{
153 if (tx_buffer_info->dma) { 152 if (tx_buffer_info->dma) {
154 if (tx_buffer_info->mapped_as_page) 153 if (tx_buffer_info->mapped_as_page)
155 dma_unmap_page(&adapter->pdev->dev, 154 dma_unmap_page(tx_ring->dev,
156 tx_buffer_info->dma, 155 tx_buffer_info->dma,
157 tx_buffer_info->length, 156 tx_buffer_info->length,
158 DMA_TO_DEVICE); 157 DMA_TO_DEVICE);
159 else 158 else
160 dma_unmap_single(&adapter->pdev->dev, 159 dma_unmap_single(tx_ring->dev,
161 tx_buffer_info->dma, 160 tx_buffer_info->dma,
162 tx_buffer_info->length, 161 tx_buffer_info->length,
163 DMA_TO_DEVICE); 162 DMA_TO_DEVICE);
@@ -175,27 +174,20 @@ static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
175#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) 174#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
176 175
177/* Tx Descriptors needed, worst case */ 176/* Tx Descriptors needed, worst case */
178#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \ 177#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
179 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) 178#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
180#ifdef MAX_SKB_FRAGS
181#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
182 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
183#else
184#define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD)
185#endif
186 179
187static void ixgbevf_tx_timeout(struct net_device *netdev); 180static void ixgbevf_tx_timeout(struct net_device *netdev);
188 181
189/** 182/**
190 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes 183 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
191 * @adapter: board private structure 184 * @q_vector: board private structure
192 * @tx_ring: tx ring to clean 185 * @tx_ring: tx ring to clean
193 **/ 186 **/
194static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter, 187static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
195 struct ixgbevf_ring *tx_ring) 188 struct ixgbevf_ring *tx_ring)
196{ 189{
197 struct net_device *netdev = adapter->netdev; 190 struct ixgbevf_adapter *adapter = q_vector->adapter;
198 struct ixgbe_hw *hw = &adapter->hw;
199 union ixgbe_adv_tx_desc *tx_desc, *eop_desc; 191 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
200 struct ixgbevf_tx_buffer *tx_buffer_info; 192 struct ixgbevf_tx_buffer *tx_buffer_info;
201 unsigned int i, eop, count = 0; 193 unsigned int i, eop, count = 0;
@@ -206,10 +198,10 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
206 198
207 i = tx_ring->next_to_clean; 199 i = tx_ring->next_to_clean;
208 eop = tx_ring->tx_buffer_info[i].next_to_watch; 200 eop = tx_ring->tx_buffer_info[i].next_to_watch;
209 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 201 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
210 202
211 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && 203 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
212 (count < tx_ring->work_limit)) { 204 (count < tx_ring->count)) {
213 bool cleaned = false; 205 bool cleaned = false;
214 rmb(); /* read buffer_info after eop_desc */ 206 rmb(); /* read buffer_info after eop_desc */
215 /* eop could change between read and DD-check */ 207 /* eop could change between read and DD-check */
@@ -217,7 +209,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
217 goto cont_loop; 209 goto cont_loop;
218 for ( ; !cleaned; count++) { 210 for ( ; !cleaned; count++) {
219 struct sk_buff *skb; 211 struct sk_buff *skb;
220 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); 212 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
221 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 213 tx_buffer_info = &tx_ring->tx_buffer_info[i];
222 cleaned = (i == eop); 214 cleaned = (i == eop);
223 skb = tx_buffer_info->skb; 215 skb = tx_buffer_info->skb;
@@ -234,7 +226,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
234 total_bytes += bytecount; 226 total_bytes += bytecount;
235 } 227 }
236 228
237 ixgbevf_unmap_and_free_tx_resource(adapter, 229 ixgbevf_unmap_and_free_tx_resource(tx_ring,
238 tx_buffer_info); 230 tx_buffer_info);
239 231
240 tx_desc->wb.status = 0; 232 tx_desc->wb.status = 0;
@@ -246,37 +238,25 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
246 238
247cont_loop: 239cont_loop:
248 eop = tx_ring->tx_buffer_info[i].next_to_watch; 240 eop = tx_ring->tx_buffer_info[i].next_to_watch;
249 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 241 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
250 } 242 }
251 243
252 tx_ring->next_to_clean = i; 244 tx_ring->next_to_clean = i;
253 245
254#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 246#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
255 if (unlikely(count && netif_carrier_ok(netdev) && 247 if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
256 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 248 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
257 /* Make sure that anybody stopping the queue after this 249 /* Make sure that anybody stopping the queue after this
258 * sees the new next_to_clean. 250 * sees the new next_to_clean.
259 */ 251 */
260 smp_mb(); 252 smp_mb();
261#ifdef HAVE_TX_MQ 253 if (__netif_subqueue_stopped(tx_ring->netdev,
262 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && 254 tx_ring->queue_index) &&
263 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
264 netif_wake_subqueue(netdev, tx_ring->queue_index);
265 ++adapter->restart_queue;
266 }
267#else
268 if (netif_queue_stopped(netdev) &&
269 !test_bit(__IXGBEVF_DOWN, &adapter->state)) { 255 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
270 netif_wake_queue(netdev); 256 netif_wake_subqueue(tx_ring->netdev,
257 tx_ring->queue_index);
271 ++adapter->restart_queue; 258 ++adapter->restart_queue;
272 } 259 }
273#endif
274 }
275
276 /* re-arm the interrupt */
277 if ((count >= tx_ring->work_limit) &&
278 (!test_bit(__IXGBEVF_DOWN, &adapter->state))) {
279 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, tx_ring->v_idx);
280 } 260 }
281 261
282 u64_stats_update_begin(&tx_ring->syncp); 262 u64_stats_update_begin(&tx_ring->syncp);
@@ -284,7 +264,7 @@ cont_loop:
284 tx_ring->total_packets += total_packets; 264 tx_ring->total_packets += total_packets;
285 u64_stats_update_end(&tx_ring->syncp); 265 u64_stats_update_end(&tx_ring->syncp);
286 266
287 return count < tx_ring->work_limit; 267 return count < tx_ring->count;
288} 268}
289 269
290/** 270/**
@@ -304,13 +284,10 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
304 bool is_vlan = (status & IXGBE_RXD_STAT_VP); 284 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
305 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); 285 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
306 286
307 if (is_vlan && test_bit(tag, adapter->active_vlans)) 287 if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
308 __vlan_hwaccel_put_tag(skb, tag); 288 __vlan_hwaccel_put_tag(skb, tag);
309 289
310 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) 290 napi_gro_receive(&q_vector->napi, skb);
311 napi_gro_receive(&q_vector->napi, skb);
312 else
313 netif_rx(skb);
314} 291}
315 292
316/** 293/**
@@ -320,12 +297,13 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
320 * @skb: skb currently being received and modified 297 * @skb: skb currently being received and modified
321 **/ 298 **/
322static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter, 299static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
300 struct ixgbevf_ring *ring,
323 u32 status_err, struct sk_buff *skb) 301 u32 status_err, struct sk_buff *skb)
324{ 302{
325 skb_checksum_none_assert(skb); 303 skb_checksum_none_assert(skb);
326 304
327 /* Rx csum disabled */ 305 /* Rx csum disabled */
328 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) 306 if (!(ring->netdev->features & NETIF_F_RXCSUM))
329 return; 307 return;
330 308
331 /* if IP and error */ 309 /* if IP and error */
@@ -360,52 +338,21 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
360 union ixgbe_adv_rx_desc *rx_desc; 338 union ixgbe_adv_rx_desc *rx_desc;
361 struct ixgbevf_rx_buffer *bi; 339 struct ixgbevf_rx_buffer *bi;
362 struct sk_buff *skb; 340 struct sk_buff *skb;
363 unsigned int i; 341 unsigned int i = rx_ring->next_to_use;
364 unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
365 342
366 i = rx_ring->next_to_use;
367 bi = &rx_ring->rx_buffer_info[i]; 343 bi = &rx_ring->rx_buffer_info[i];
368 344
369 while (cleaned_count--) { 345 while (cleaned_count--) {
370 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); 346 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
371
372 if (!bi->page_dma &&
373 (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
374 if (!bi->page) {
375 bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
376 if (!bi->page) {
377 adapter->alloc_rx_page_failed++;
378 goto no_buffers;
379 }
380 bi->page_offset = 0;
381 } else {
382 /* use a half page if we're re-using */
383 bi->page_offset ^= (PAGE_SIZE / 2);
384 }
385
386 bi->page_dma = dma_map_page(&pdev->dev, bi->page,
387 bi->page_offset,
388 (PAGE_SIZE / 2),
389 DMA_FROM_DEVICE);
390 }
391
392 skb = bi->skb; 347 skb = bi->skb;
393 if (!skb) { 348 if (!skb) {
394 skb = netdev_alloc_skb(adapter->netdev, 349 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
395 bufsz); 350 rx_ring->rx_buf_len);
396
397 if (!skb) { 351 if (!skb) {
398 adapter->alloc_rx_buff_failed++; 352 adapter->alloc_rx_buff_failed++;
399 goto no_buffers; 353 goto no_buffers;
400 } 354 }
401 355
402 /*
403 * Make buffer alignment 2 beyond a 16 byte boundary
404 * this will result in a 16 byte aligned IP header after
405 * the 14 byte MAC header is removed
406 */
407 skb_reserve(skb, NET_IP_ALIGN);
408
409 bi->skb = skb; 356 bi->skb = skb;
410 } 357 }
411 if (!bi->dma) { 358 if (!bi->dma) {
@@ -413,14 +360,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
413 rx_ring->rx_buf_len, 360 rx_ring->rx_buf_len,
414 DMA_FROM_DEVICE); 361 DMA_FROM_DEVICE);
415 } 362 }
416 /* Refresh the desc even if buffer_addrs didn't change because 363 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
417 * each write-back erases this info. */
418 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
419 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
420 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
421 } else {
422 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
423 }
424 364
425 i++; 365 i++;
426 if (i == rx_ring->count) 366 if (i == rx_ring->count)
@@ -431,36 +371,22 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
431no_buffers: 371no_buffers:
432 if (rx_ring->next_to_use != i) { 372 if (rx_ring->next_to_use != i) {
433 rx_ring->next_to_use = i; 373 rx_ring->next_to_use = i;
434 if (i-- == 0)
435 i = (rx_ring->count - 1);
436 374
437 ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i); 375 ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
438 } 376 }
439} 377}
440 378
441static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, 379static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
442 u64 qmask) 380 u32 qmask)
443{ 381{
444 u32 mask;
445 struct ixgbe_hw *hw = &adapter->hw; 382 struct ixgbe_hw *hw = &adapter->hw;
446 383
447 mask = (qmask & 0xFFFFFFFF); 384 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
448 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
449}
450
451static inline u16 ixgbevf_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
452{
453 return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
454}
455
456static inline u16 ixgbevf_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
457{
458 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
459} 385}
460 386
461static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, 387static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
462 struct ixgbevf_ring *rx_ring, 388 struct ixgbevf_ring *rx_ring,
463 int *work_done, int work_to_do) 389 int budget)
464{ 390{
465 struct ixgbevf_adapter *adapter = q_vector->adapter; 391 struct ixgbevf_adapter *adapter = q_vector->adapter;
466 struct pci_dev *pdev = adapter->pdev; 392 struct pci_dev *pdev = adapter->pdev;
@@ -469,36 +395,21 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
469 struct sk_buff *skb; 395 struct sk_buff *skb;
470 unsigned int i; 396 unsigned int i;
471 u32 len, staterr; 397 u32 len, staterr;
472 u16 hdr_info;
473 bool cleaned = false;
474 int cleaned_count = 0; 398 int cleaned_count = 0;
475 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 399 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
476 400
477 i = rx_ring->next_to_clean; 401 i = rx_ring->next_to_clean;
478 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); 402 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
479 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 403 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
480 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 404 rx_buffer_info = &rx_ring->rx_buffer_info[i];
481 405
482 while (staterr & IXGBE_RXD_STAT_DD) { 406 while (staterr & IXGBE_RXD_STAT_DD) {
483 u32 upper_len = 0; 407 if (!budget)
484 if (*work_done >= work_to_do)
485 break; 408 break;
486 (*work_done)++; 409 budget--;
487 410
488 rmb(); /* read descriptor and rx_buffer_info after status DD */ 411 rmb(); /* read descriptor and rx_buffer_info after status DD */
489 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 412 len = le16_to_cpu(rx_desc->wb.upper.length);
490 hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc));
491 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
492 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
493 if (hdr_info & IXGBE_RXDADV_SPH)
494 adapter->rx_hdr_split++;
495 if (len > IXGBEVF_RX_HDR_SIZE)
496 len = IXGBEVF_RX_HDR_SIZE;
497 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
498 } else {
499 len = le16_to_cpu(rx_desc->wb.upper.length);
500 }
501 cleaned = true;
502 skb = rx_buffer_info->skb; 413 skb = rx_buffer_info->skb;
503 prefetch(skb->data - NET_IP_ALIGN); 414 prefetch(skb->data - NET_IP_ALIGN);
504 rx_buffer_info->skb = NULL; 415 rx_buffer_info->skb = NULL;
@@ -511,46 +422,19 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
511 skb_put(skb, len); 422 skb_put(skb, len);
512 } 423 }
513 424
514 if (upper_len) {
515 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
516 PAGE_SIZE / 2, DMA_FROM_DEVICE);
517 rx_buffer_info->page_dma = 0;
518 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
519 rx_buffer_info->page,
520 rx_buffer_info->page_offset,
521 upper_len);
522
523 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
524 (page_count(rx_buffer_info->page) != 1))
525 rx_buffer_info->page = NULL;
526 else
527 get_page(rx_buffer_info->page);
528
529 skb->len += upper_len;
530 skb->data_len += upper_len;
531 skb->truesize += upper_len;
532 }
533
534 i++; 425 i++;
535 if (i == rx_ring->count) 426 if (i == rx_ring->count)
536 i = 0; 427 i = 0;
537 428
538 next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i); 429 next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
539 prefetch(next_rxd); 430 prefetch(next_rxd);
540 cleaned_count++; 431 cleaned_count++;
541 432
542 next_buffer = &rx_ring->rx_buffer_info[i]; 433 next_buffer = &rx_ring->rx_buffer_info[i];
543 434
544 if (!(staterr & IXGBE_RXD_STAT_EOP)) { 435 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
545 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 436 skb->next = next_buffer->skb;
546 rx_buffer_info->skb = next_buffer->skb; 437 skb->next->prev = skb;
547 rx_buffer_info->dma = next_buffer->dma;
548 next_buffer->skb = skb;
549 next_buffer->dma = 0;
550 } else {
551 skb->next = next_buffer->skb;
552 skb->next->prev = skb;
553 }
554 adapter->non_eop_descs++; 438 adapter->non_eop_descs++;
555 goto next_desc; 439 goto next_desc;
556 } 440 }
@@ -561,7 +445,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
561 goto next_desc; 445 goto next_desc;
562 } 446 }
563 447
564 ixgbevf_rx_checksum(adapter, staterr, skb); 448 ixgbevf_rx_checksum(adapter, rx_ring, staterr, skb);
565 449
566 /* probably a little skewed due to removing CRC */ 450 /* probably a little skewed due to removing CRC */
567 total_rx_bytes += skb->len; 451 total_rx_bytes += skb->len;
@@ -576,7 +460,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
576 if (header_fixup_len < 14) 460 if (header_fixup_len < 14)
577 skb_push(skb, header_fixup_len); 461 skb_push(skb, header_fixup_len);
578 } 462 }
579 skb->protocol = eth_type_trans(skb, adapter->netdev); 463 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
580 464
581 ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); 465 ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
582 466
@@ -608,92 +492,52 @@ next_desc:
608 rx_ring->total_bytes += total_rx_bytes; 492 rx_ring->total_bytes += total_rx_bytes;
609 u64_stats_update_end(&rx_ring->syncp); 493 u64_stats_update_end(&rx_ring->syncp);
610 494
611 return cleaned; 495 return !!budget;
612} 496}
613 497
614/** 498/**
615 * ixgbevf_clean_rxonly - msix (aka one shot) rx clean routine 499 * ixgbevf_poll - NAPI polling calback
616 * @napi: napi struct with our devices info in it 500 * @napi: napi struct with our devices info in it
617 * @budget: amount of work driver is allowed to do this pass, in packets 501 * @budget: amount of work driver is allowed to do this pass, in packets
618 * 502 *
619 * This function is optimized for cleaning one queue only on a single 503 * This function will clean more than one or more rings associated with a
620 * q_vector!!!
621 **/
622static int ixgbevf_clean_rxonly(struct napi_struct *napi, int budget)
623{
624 struct ixgbevf_q_vector *q_vector =
625 container_of(napi, struct ixgbevf_q_vector, napi);
626 struct ixgbevf_adapter *adapter = q_vector->adapter;
627 struct ixgbevf_ring *rx_ring = NULL;
628 int work_done = 0;
629 long r_idx;
630
631 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
632 rx_ring = &(adapter->rx_ring[r_idx]);
633
634 ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
635
636 /* If all Rx work done, exit the polling mode */
637 if (work_done < budget) {
638 napi_complete(napi);
639 if (adapter->itr_setting & 1)
640 ixgbevf_set_itr_msix(q_vector);
641 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
642 ixgbevf_irq_enable_queues(adapter, rx_ring->v_idx);
643 }
644
645 return work_done;
646}
647
648/**
649 * ixgbevf_clean_rxonly_many - msix (aka one shot) rx clean routine
650 * @napi: napi struct with our devices info in it
651 * @budget: amount of work driver is allowed to do this pass, in packets
652 *
653 * This function will clean more than one rx queue associated with a
654 * q_vector. 504 * q_vector.
655 **/ 505 **/
656static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget) 506static int ixgbevf_poll(struct napi_struct *napi, int budget)
657{ 507{
658 struct ixgbevf_q_vector *q_vector = 508 struct ixgbevf_q_vector *q_vector =
659 container_of(napi, struct ixgbevf_q_vector, napi); 509 container_of(napi, struct ixgbevf_q_vector, napi);
660 struct ixgbevf_adapter *adapter = q_vector->adapter; 510 struct ixgbevf_adapter *adapter = q_vector->adapter;
661 struct ixgbevf_ring *rx_ring = NULL; 511 struct ixgbevf_ring *ring;
662 int work_done = 0, i; 512 int per_ring_budget;
663 long r_idx; 513 bool clean_complete = true;
664 u64 enable_mask = 0; 514
515 ixgbevf_for_each_ring(ring, q_vector->tx)
516 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
665 517
666 /* attempt to distribute budget to each queue fairly, but don't allow 518 /* attempt to distribute budget to each queue fairly, but don't allow
667 * the budget to go below 1 because we'll exit polling */ 519 * the budget to go below 1 because we'll exit polling */
668 budget /= (q_vector->rxr_count ?: 1); 520 if (q_vector->rx.count > 1)
669 budget = max(budget, 1); 521 per_ring_budget = max(budget/q_vector->rx.count, 1);
670 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 522 else
671 for (i = 0; i < q_vector->rxr_count; i++) { 523 per_ring_budget = budget;
672 rx_ring = &(adapter->rx_ring[r_idx]); 524
673 ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget); 525 ixgbevf_for_each_ring(ring, q_vector->rx)
674 enable_mask |= rx_ring->v_idx; 526 clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
675 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 527 per_ring_budget);
676 r_idx + 1); 528
677 } 529 /* If all work not completed, return budget and keep polling */
678 530 if (!clean_complete)
679#ifndef HAVE_NETDEV_NAPI_LIST 531 return budget;
680 if (!netif_running(adapter->netdev)) 532 /* all work done, exit the polling mode */
681 work_done = 0; 533 napi_complete(napi);
682 534 if (adapter->rx_itr_setting & 1)
683#endif 535 ixgbevf_set_itr(q_vector);
684 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 536 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
685 rx_ring = &(adapter->rx_ring[r_idx]); 537 ixgbevf_irq_enable_queues(adapter,
686 538 1 << q_vector->v_idx);
687 /* If all Rx work done, exit the polling mode */
688 if (work_done < budget) {
689 napi_complete(napi);
690 if (adapter->itr_setting & 1)
691 ixgbevf_set_itr_msix(q_vector);
692 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
693 ixgbevf_irq_enable_queues(adapter, enable_mask);
694 }
695 539
696 return work_done; 540 return 0;
697} 541}
698 542
699 543
@@ -707,56 +551,49 @@ static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget)
707static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) 551static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
708{ 552{
709 struct ixgbevf_q_vector *q_vector; 553 struct ixgbevf_q_vector *q_vector;
710 struct ixgbe_hw *hw = &adapter->hw; 554 int q_vectors, v_idx;
711 int i, j, q_vectors, v_idx, r_idx;
712 u32 mask;
713 555
714 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 556 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
557 adapter->eims_enable_mask = 0;
715 558
716 /* 559 /*
717 * Populate the IVAR table and set the ITR values to the 560 * Populate the IVAR table and set the ITR values to the
718 * corresponding register. 561 * corresponding register.
719 */ 562 */
720 for (v_idx = 0; v_idx < q_vectors; v_idx++) { 563 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
564 struct ixgbevf_ring *ring;
721 q_vector = adapter->q_vector[v_idx]; 565 q_vector = adapter->q_vector[v_idx];
722 /* XXX for_each_set_bit(...) */ 566
723 r_idx = find_first_bit(q_vector->rxr_idx, 567 ixgbevf_for_each_ring(ring, q_vector->rx)
724 adapter->num_rx_queues); 568 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
725 569
726 for (i = 0; i < q_vector->rxr_count; i++) { 570 ixgbevf_for_each_ring(ring, q_vector->tx)
727 j = adapter->rx_ring[r_idx].reg_idx; 571 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
728 ixgbevf_set_ivar(adapter, 0, j, v_idx); 572
729 r_idx = find_next_bit(q_vector->rxr_idx, 573 if (q_vector->tx.ring && !q_vector->rx.ring) {
730 adapter->num_rx_queues, 574 /* tx only vector */
731 r_idx + 1); 575 if (adapter->tx_itr_setting == 1)
732 } 576 q_vector->itr = IXGBE_10K_ITR;
733 r_idx = find_first_bit(q_vector->txr_idx, 577 else
734 adapter->num_tx_queues); 578 q_vector->itr = adapter->tx_itr_setting;
735 579 } else {
736 for (i = 0; i < q_vector->txr_count; i++) { 580 /* rx or rx/tx vector */
737 j = adapter->tx_ring[r_idx].reg_idx; 581 if (adapter->rx_itr_setting == 1)
738 ixgbevf_set_ivar(adapter, 1, j, v_idx); 582 q_vector->itr = IXGBE_20K_ITR;
739 r_idx = find_next_bit(q_vector->txr_idx, 583 else
740 adapter->num_tx_queues, 584 q_vector->itr = adapter->rx_itr_setting;
741 r_idx + 1);
742 } 585 }
743 586
744 /* if this is a tx only vector halve the interrupt rate */ 587 /* add q_vector eims value to global eims_enable_mask */
745 if (q_vector->txr_count && !q_vector->rxr_count) 588 adapter->eims_enable_mask |= 1 << v_idx;
746 q_vector->eitr = (adapter->eitr_param >> 1);
747 else if (q_vector->rxr_count)
748 /* rx only */
749 q_vector->eitr = adapter->eitr_param;
750 589
751 ixgbevf_write_eitr(adapter, v_idx, q_vector->eitr); 590 ixgbevf_write_eitr(q_vector);
752 } 591 }
753 592
754 ixgbevf_set_ivar(adapter, -1, 1, v_idx); 593 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
755 594 /* setup eims_other and add value to global eims_enable_mask */
756 /* set up to autoclear timer, and the vectors */ 595 adapter->eims_other = 1 << v_idx;
757 mask = IXGBE_EIMS_ENABLE_MASK; 596 adapter->eims_enable_mask |= adapter->eims_other;
758 mask &= ~IXGBE_EIMS_OTHER;
759 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
760} 597}
761 598
762enum latency_range { 599enum latency_range {
@@ -768,11 +605,8 @@ enum latency_range {
768 605
769/** 606/**
770 * ixgbevf_update_itr - update the dynamic ITR value based on statistics 607 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
771 * @adapter: pointer to adapter 608 * @q_vector: structure containing interrupt and ring information
772 * @eitr: eitr setting (ints per sec) to give last timeslice 609 * @ring_container: structure containing ring performance data
773 * @itr_setting: current throttle rate in ints/second
774 * @packets: the number of packets during this measurement interval
775 * @bytes: the number of bytes during this measurement interval
776 * 610 *
777 * Stores a new ITR value based on packets and byte 611 * Stores a new ITR value based on packets and byte
778 * counts during the last interrupt. The advantage of per interrupt 612 * counts during the last interrupt. The advantage of per interrupt
@@ -782,17 +616,17 @@ enum latency_range {
782 * on testing data as well as attempting to minimize response time 616 * on testing data as well as attempting to minimize response time
783 * while increasing bulk throughput. 617 * while increasing bulk throughput.
784 **/ 618 **/
785static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter, 619static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
786 u32 eitr, u8 itr_setting, 620 struct ixgbevf_ring_container *ring_container)
787 int packets, int bytes)
788{ 621{
789 unsigned int retval = itr_setting; 622 int bytes = ring_container->total_bytes;
623 int packets = ring_container->total_packets;
790 u32 timepassed_us; 624 u32 timepassed_us;
791 u64 bytes_perint; 625 u64 bytes_perint;
626 u8 itr_setting = ring_container->itr;
792 627
793 if (packets == 0) 628 if (packets == 0)
794 goto update_itr_done; 629 return;
795
796 630
797 /* simple throttlerate management 631 /* simple throttlerate management
798 * 0-20MB/s lowest (100000 ints/s) 632 * 0-20MB/s lowest (100000 ints/s)
@@ -800,46 +634,48 @@ static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter,
800 * 100-1249MB/s bulk (8000 ints/s) 634 * 100-1249MB/s bulk (8000 ints/s)
801 */ 635 */
802 /* what was last interrupt timeslice? */ 636 /* what was last interrupt timeslice? */
803 timepassed_us = 1000000/eitr; 637 timepassed_us = q_vector->itr >> 2;
804 bytes_perint = bytes / timepassed_us; /* bytes/usec */ 638 bytes_perint = bytes / timepassed_us; /* bytes/usec */
805 639
806 switch (itr_setting) { 640 switch (itr_setting) {
807 case lowest_latency: 641 case lowest_latency:
808 if (bytes_perint > adapter->eitr_low) 642 if (bytes_perint > 10)
809 retval = low_latency; 643 itr_setting = low_latency;
810 break; 644 break;
811 case low_latency: 645 case low_latency:
812 if (bytes_perint > adapter->eitr_high) 646 if (bytes_perint > 20)
813 retval = bulk_latency; 647 itr_setting = bulk_latency;
814 else if (bytes_perint <= adapter->eitr_low) 648 else if (bytes_perint <= 10)
815 retval = lowest_latency; 649 itr_setting = lowest_latency;
816 break; 650 break;
817 case bulk_latency: 651 case bulk_latency:
818 if (bytes_perint <= adapter->eitr_high) 652 if (bytes_perint <= 20)
819 retval = low_latency; 653 itr_setting = low_latency;
820 break; 654 break;
821 } 655 }
822 656
823update_itr_done: 657 /* clear work counters since we have the values we need */
824 return retval; 658 ring_container->total_bytes = 0;
659 ring_container->total_packets = 0;
660
661 /* write updated itr to ring container */
662 ring_container->itr = itr_setting;
825} 663}
826 664
827/** 665/**
828 * ixgbevf_write_eitr - write VTEITR register in hardware specific way 666 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
829 * @adapter: pointer to adapter struct 667 * @q_vector: structure containing interrupt and ring information
830 * @v_idx: vector index into q_vector array
831 * @itr_reg: new value to be written in *register* format, not ints/s
832 * 668 *
833 * This function is made to be called by ethtool and by the driver 669 * This function is made to be called by ethtool and by the driver
834 * when it needs to update VTEITR registers at runtime. Hardware 670 * when it needs to update VTEITR registers at runtime. Hardware
835 * specific quirks/differences are taken care of here. 671 * specific quirks/differences are taken care of here.
836 */ 672 */
837static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx, 673void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
838 u32 itr_reg)
839{ 674{
675 struct ixgbevf_adapter *adapter = q_vector->adapter;
840 struct ixgbe_hw *hw = &adapter->hw; 676 struct ixgbe_hw *hw = &adapter->hw;
841 677 int v_idx = q_vector->v_idx;
842 itr_reg = EITR_INTS_PER_SEC_TO_REG(itr_reg); 678 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
843 679
844 /* 680 /*
845 * set the WDIS bit to not clear the timer bits and cause an 681 * set the WDIS bit to not clear the timer bits and cause an
@@ -850,84 +686,49 @@ static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
850 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); 686 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
851} 687}
852 688
853static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector) 689static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
854{ 690{
855 struct ixgbevf_adapter *adapter = q_vector->adapter; 691 u32 new_itr = q_vector->itr;
856 u32 new_itr; 692 u8 current_itr;
857 u8 current_itr, ret_itr; 693
858 int i, r_idx, v_idx = q_vector->v_idx; 694 ixgbevf_update_itr(q_vector, &q_vector->tx);
859 struct ixgbevf_ring *rx_ring, *tx_ring; 695 ixgbevf_update_itr(q_vector, &q_vector->rx);
860 696
861 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 697 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
862 for (i = 0; i < q_vector->txr_count; i++) {
863 tx_ring = &(adapter->tx_ring[r_idx]);
864 ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
865 q_vector->tx_itr,
866 tx_ring->total_packets,
867 tx_ring->total_bytes);
868 /* if the result for this queue would decrease interrupt
869 * rate for this vector then use that result */
870 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
871 q_vector->tx_itr - 1 : ret_itr);
872 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
873 r_idx + 1);
874 }
875
876 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
877 for (i = 0; i < q_vector->rxr_count; i++) {
878 rx_ring = &(adapter->rx_ring[r_idx]);
879 ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
880 q_vector->rx_itr,
881 rx_ring->total_packets,
882 rx_ring->total_bytes);
883 /* if the result for this queue would decrease interrupt
884 * rate for this vector then use that result */
885 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
886 q_vector->rx_itr - 1 : ret_itr);
887 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
888 r_idx + 1);
889 }
890
891 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
892 698
893 switch (current_itr) { 699 switch (current_itr) {
894 /* counts and packets in update_itr are dependent on these numbers */ 700 /* counts and packets in update_itr are dependent on these numbers */
895 case lowest_latency: 701 case lowest_latency:
896 new_itr = 100000; 702 new_itr = IXGBE_100K_ITR;
897 break; 703 break;
898 case low_latency: 704 case low_latency:
899 new_itr = 20000; /* aka hwitr = ~200 */ 705 new_itr = IXGBE_20K_ITR;
900 break; 706 break;
901 case bulk_latency: 707 case bulk_latency:
902 default: 708 default:
903 new_itr = 8000; 709 new_itr = IXGBE_8K_ITR;
904 break; 710 break;
905 } 711 }
906 712
907 if (new_itr != q_vector->eitr) { 713 if (new_itr != q_vector->itr) {
908 u32 itr_reg;
909
910 /* save the algorithm value here, not the smoothed one */
911 q_vector->eitr = new_itr;
912 /* do an exponential smoothing */ 714 /* do an exponential smoothing */
913 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); 715 new_itr = (10 * new_itr * q_vector->itr) /
914 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr); 716 ((9 * new_itr) + q_vector->itr);
915 ixgbevf_write_eitr(adapter, v_idx, itr_reg); 717
718 /* save the algorithm value here */
719 q_vector->itr = new_itr;
720
721 ixgbevf_write_eitr(q_vector);
916 } 722 }
917} 723}
918 724
919static irqreturn_t ixgbevf_msix_mbx(int irq, void *data) 725static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
920{ 726{
921 struct net_device *netdev = data; 727 struct ixgbevf_adapter *adapter = data;
922 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
923 struct ixgbe_hw *hw = &adapter->hw; 728 struct ixgbe_hw *hw = &adapter->hw;
924 u32 eicr;
925 u32 msg; 729 u32 msg;
926 bool got_ack = false; 730 bool got_ack = false;
927 731
928 eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
929 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
930
931 if (!hw->mbx.ops.check_for_ack(hw)) 732 if (!hw->mbx.ops.check_for_ack(hw))
932 got_ack = true; 733 got_ack = true;
933 734
@@ -956,63 +757,24 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
956 if (got_ack) 757 if (got_ack)
957 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK; 758 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
958 759
959 return IRQ_HANDLED; 760 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
960}
961
962static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
963{
964 struct ixgbevf_q_vector *q_vector = data;
965 struct ixgbevf_adapter *adapter = q_vector->adapter;
966 struct ixgbevf_ring *tx_ring;
967 int i, r_idx;
968
969 if (!q_vector->txr_count)
970 return IRQ_HANDLED;
971
972 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
973 for (i = 0; i < q_vector->txr_count; i++) {
974 tx_ring = &(adapter->tx_ring[r_idx]);
975 ixgbevf_clean_tx_irq(adapter, tx_ring);
976 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
977 r_idx + 1);
978 }
979
980 if (adapter->itr_setting & 1)
981 ixgbevf_set_itr_msix(q_vector);
982 761
983 return IRQ_HANDLED; 762 return IRQ_HANDLED;
984} 763}
985 764
765
986/** 766/**
987 * ixgbevf_msix_clean_rx - single unshared vector rx clean (all queues) 767 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
988 * @irq: unused 768 * @irq: unused
989 * @data: pointer to our q_vector struct for this interrupt vector 769 * @data: pointer to our q_vector struct for this interrupt vector
990 **/ 770 **/
991static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data) 771static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
992{ 772{
993 struct ixgbevf_q_vector *q_vector = data; 773 struct ixgbevf_q_vector *q_vector = data;
994 struct ixgbevf_adapter *adapter = q_vector->adapter;
995 struct ixgbe_hw *hw = &adapter->hw;
996 struct ixgbevf_ring *rx_ring;
997 int r_idx;
998
999 if (!q_vector->rxr_count)
1000 return IRQ_HANDLED;
1001
1002 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1003 rx_ring = &(adapter->rx_ring[r_idx]);
1004 /* disable interrupts on this vector only */
1005 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, rx_ring->v_idx);
1006 napi_schedule(&q_vector->napi);
1007
1008
1009 return IRQ_HANDLED;
1010}
1011 774
1012static irqreturn_t ixgbevf_msix_clean_many(int irq, void *data) 775 /* EIAM disabled interrupts (on this vector) for us */
1013{ 776 if (q_vector->rx.ring || q_vector->tx.ring)
1014 ixgbevf_msix_clean_rx(irq, data); 777 napi_schedule(&q_vector->napi);
1015 ixgbevf_msix_clean_tx(irq, data);
1016 778
1017 return IRQ_HANDLED; 779 return IRQ_HANDLED;
1018} 780}
@@ -1022,9 +784,9 @@ static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
1022{ 784{
1023 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; 785 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1024 786
1025 set_bit(r_idx, q_vector->rxr_idx); 787 a->rx_ring[r_idx].next = q_vector->rx.ring;
1026 q_vector->rxr_count++; 788 q_vector->rx.ring = &a->rx_ring[r_idx];
1027 a->rx_ring[r_idx].v_idx = 1 << v_idx; 789 q_vector->rx.count++;
1028} 790}
1029 791
1030static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx, 792static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
@@ -1032,9 +794,9 @@ static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
1032{ 794{
1033 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; 795 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1034 796
1035 set_bit(t_idx, q_vector->txr_idx); 797 a->tx_ring[t_idx].next = q_vector->tx.ring;
1036 q_vector->txr_count++; 798 q_vector->tx.ring = &a->tx_ring[t_idx];
1037 a->tx_ring[t_idx].v_idx = 1 << v_idx; 799 q_vector->tx.count++;
1038} 800}
1039 801
1040/** 802/**
@@ -1110,37 +872,30 @@ out:
1110static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) 872static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1111{ 873{
1112 struct net_device *netdev = adapter->netdev; 874 struct net_device *netdev = adapter->netdev;
1113 irqreturn_t (*handler)(int, void *); 875 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1114 int i, vector, q_vectors, err; 876 int vector, err;
1115 int ri = 0, ti = 0; 877 int ri = 0, ti = 0;
1116 878
1117 /* Decrement for Other and TCP Timer vectors */
1118 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1119
1120#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
1121 ? &ixgbevf_msix_clean_many : \
1122 (_v)->rxr_count ? &ixgbevf_msix_clean_rx : \
1123 (_v)->txr_count ? &ixgbevf_msix_clean_tx : \
1124 NULL)
1125 for (vector = 0; vector < q_vectors; vector++) { 879 for (vector = 0; vector < q_vectors; vector++) {
1126 handler = SET_HANDLER(adapter->q_vector[vector]); 880 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
1127 881 struct msix_entry *entry = &adapter->msix_entries[vector];
1128 if (handler == &ixgbevf_msix_clean_rx) { 882
1129 sprintf(adapter->name[vector], "%s-%s-%d", 883 if (q_vector->tx.ring && q_vector->rx.ring) {
1130 netdev->name, "rx", ri++); 884 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1131 } else if (handler == &ixgbevf_msix_clean_tx) { 885 "%s-%s-%d", netdev->name, "TxRx", ri++);
1132 sprintf(adapter->name[vector], "%s-%s-%d", 886 ti++;
1133 netdev->name, "tx", ti++); 887 } else if (q_vector->rx.ring) {
1134 } else if (handler == &ixgbevf_msix_clean_many) { 888 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1135 sprintf(adapter->name[vector], "%s-%s-%d", 889 "%s-%s-%d", netdev->name, "rx", ri++);
1136 netdev->name, "TxRx", vector); 890 } else if (q_vector->tx.ring) {
891 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
892 "%s-%s-%d", netdev->name, "tx", ti++);
1137 } else { 893 } else {
1138 /* skip this unused q_vector */ 894 /* skip this unused q_vector */
1139 continue; 895 continue;
1140 } 896 }
1141 err = request_irq(adapter->msix_entries[vector].vector, 897 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
1142 handler, 0, adapter->name[vector], 898 q_vector->name, q_vector);
1143 adapter->q_vector[vector]);
1144 if (err) { 899 if (err) {
1145 hw_dbg(&adapter->hw, 900 hw_dbg(&adapter->hw,
1146 "request_irq failed for MSIX interrupt " 901 "request_irq failed for MSIX interrupt "
@@ -1149,9 +904,8 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1149 } 904 }
1150 } 905 }
1151 906
1152 sprintf(adapter->name[vector], "%s:mbx", netdev->name);
1153 err = request_irq(adapter->msix_entries[vector].vector, 907 err = request_irq(adapter->msix_entries[vector].vector,
1154 &ixgbevf_msix_mbx, 0, adapter->name[vector], netdev); 908 &ixgbevf_msix_mbx, 0, netdev->name, adapter);
1155 if (err) { 909 if (err) {
1156 hw_dbg(&adapter->hw, 910 hw_dbg(&adapter->hw,
1157 "request_irq for msix_mbx failed: %d\n", err); 911 "request_irq for msix_mbx failed: %d\n", err);
@@ -1161,9 +915,11 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1161 return 0; 915 return 0;
1162 916
1163free_queue_irqs: 917free_queue_irqs:
1164 for (i = vector - 1; i >= 0; i--) 918 while (vector) {
1165 free_irq(adapter->msix_entries[--vector].vector, 919 vector--;
1166 &(adapter->q_vector[i])); 920 free_irq(adapter->msix_entries[vector].vector,
921 adapter->q_vector[vector]);
922 }
1167 pci_disable_msix(adapter->pdev); 923 pci_disable_msix(adapter->pdev);
1168 kfree(adapter->msix_entries); 924 kfree(adapter->msix_entries);
1169 adapter->msix_entries = NULL; 925 adapter->msix_entries = NULL;
@@ -1176,11 +932,10 @@ static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
1176 932
1177 for (i = 0; i < q_vectors; i++) { 933 for (i = 0; i < q_vectors; i++) {
1178 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i]; 934 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
1179 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES); 935 q_vector->rx.ring = NULL;
1180 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES); 936 q_vector->tx.ring = NULL;
1181 q_vector->rxr_count = 0; 937 q_vector->rx.count = 0;
1182 q_vector->txr_count = 0; 938 q_vector->tx.count = 0;
1183 q_vector->eitr = adapter->eitr_param;
1184 } 939 }
1185} 940}
1186 941
@@ -1206,17 +961,20 @@ static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1206 961
1207static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter) 962static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1208{ 963{
1209 struct net_device *netdev = adapter->netdev;
1210 int i, q_vectors; 964 int i, q_vectors;
1211 965
1212 q_vectors = adapter->num_msix_vectors; 966 q_vectors = adapter->num_msix_vectors;
1213
1214 i = q_vectors - 1; 967 i = q_vectors - 1;
1215 968
1216 free_irq(adapter->msix_entries[i].vector, netdev); 969 free_irq(adapter->msix_entries[i].vector, adapter);
1217 i--; 970 i--;
1218 971
1219 for (; i >= 0; i--) { 972 for (; i >= 0; i--) {
973 /* free only the irqs that were actually requested */
974 if (!adapter->q_vector[i]->rx.ring &&
975 !adapter->q_vector[i]->tx.ring)
976 continue;
977
1220 free_irq(adapter->msix_entries[i].vector, 978 free_irq(adapter->msix_entries[i].vector,
1221 adapter->q_vector[i]); 979 adapter->q_vector[i]);
1222 } 980 }
@@ -1230,10 +988,12 @@ static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1230 **/ 988 **/
1231static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter) 989static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1232{ 990{
1233 int i;
1234 struct ixgbe_hw *hw = &adapter->hw; 991 struct ixgbe_hw *hw = &adapter->hw;
992 int i;
1235 993
994 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
1236 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0); 995 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
996 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
1237 997
1238 IXGBE_WRITE_FLUSH(hw); 998 IXGBE_WRITE_FLUSH(hw);
1239 999
@@ -1245,23 +1005,13 @@ static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1245 * ixgbevf_irq_enable - Enable default interrupt generation settings 1005 * ixgbevf_irq_enable - Enable default interrupt generation settings
1246 * @adapter: board private structure 1006 * @adapter: board private structure
1247 **/ 1007 **/
1248static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter, 1008static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1249 bool queues, bool flush)
1250{ 1009{
1251 struct ixgbe_hw *hw = &adapter->hw; 1010 struct ixgbe_hw *hw = &adapter->hw;
1252 u32 mask;
1253 u64 qmask;
1254
1255 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1256 qmask = ~0;
1257
1258 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1259
1260 if (queues)
1261 ixgbevf_irq_enable_queues(adapter, qmask);
1262 1011
1263 if (flush) 1012 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1264 IXGBE_WRITE_FLUSH(hw); 1013 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1014 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1265} 1015}
1266 1016
1267/** 1017/**
@@ -1311,29 +1061,14 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1311 1061
1312 srrctl = IXGBE_SRRCTL_DROP_EN; 1062 srrctl = IXGBE_SRRCTL_DROP_EN;
1313 1063
1314 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 1064 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1315 u16 bufsz = IXGBEVF_RXBUFFER_2048;
1316 /* grow the amount we can receive on large page machines */
1317 if (bufsz < (PAGE_SIZE / 2))
1318 bufsz = (PAGE_SIZE / 2);
1319 /* cap the bufsz at our largest descriptor size */
1320 bufsz = min((u16)IXGBEVF_MAX_RXBUFFER, bufsz);
1321
1322 srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1323 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1324 srrctl |= ((IXGBEVF_RX_HDR_SIZE <<
1325 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
1326 IXGBE_SRRCTL_BSIZEHDR_MASK);
1327 } else {
1328 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1329 1065
1330 if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE) 1066 if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
1331 srrctl |= IXGBEVF_RXBUFFER_2048 >> 1067 srrctl |= IXGBEVF_RXBUFFER_2048 >>
1332 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1068 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1333 else 1069 else
1334 srrctl |= rx_ring->rx_buf_len >> 1070 srrctl |= rx_ring->rx_buf_len >>
1335 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1071 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1336 }
1337 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); 1072 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1338} 1073}
1339 1074
@@ -1353,36 +1088,12 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1353 u32 rdlen; 1088 u32 rdlen;
1354 int rx_buf_len; 1089 int rx_buf_len;
1355 1090
1356 /* Decide whether to use packet split mode or not */ 1091 /* PSRTYPE must be initialized in 82599 */
1357 if (netdev->mtu > ETH_DATA_LEN) { 1092 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
1358 if (adapter->flags & IXGBE_FLAG_RX_PS_CAPABLE) 1093 if (netdev->mtu <= ETH_DATA_LEN)
1359 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; 1094 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1360 else 1095 else
1361 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; 1096 rx_buf_len = ALIGN(max_frame, 1024);
1362 } else {
1363 if (adapter->flags & IXGBE_FLAG_RX_1BUF_CAPABLE)
1364 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
1365 else
1366 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
1367 }
1368
1369 /* Set the RX buffer length according to the mode */
1370 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1371 /* PSRTYPE must be initialized in 82599 */
1372 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
1373 IXGBE_PSRTYPE_UDPHDR |
1374 IXGBE_PSRTYPE_IPV4HDR |
1375 IXGBE_PSRTYPE_IPV6HDR |
1376 IXGBE_PSRTYPE_L2HDR;
1377 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1378 rx_buf_len = IXGBEVF_RX_HDR_SIZE;
1379 } else {
1380 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
1381 if (netdev->mtu <= ETH_DATA_LEN)
1382 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1383 else
1384 rx_buf_len = ALIGN(max_frame, 1024);
1385 }
1386 1097
1387 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); 1098 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1388 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1099 /* Setup the HW Rx Head and Tail Descriptor Pointers and
@@ -1493,15 +1204,8 @@ static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1493 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1204 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1494 1205
1495 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1206 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1496 struct napi_struct *napi;
1497 q_vector = adapter->q_vector[q_idx]; 1207 q_vector = adapter->q_vector[q_idx];
1498 if (!q_vector->rxr_count) 1208 napi_enable(&q_vector->napi);
1499 continue;
1500 napi = &q_vector->napi;
1501 if (q_vector->rxr_count > 1)
1502 napi->poll = &ixgbevf_clean_rxonly_many;
1503
1504 napi_enable(napi);
1505 } 1209 }
1506} 1210}
1507 1211
@@ -1513,8 +1217,6 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1513 1217
1514 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1218 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1515 q_vector = adapter->q_vector[q_idx]; 1219 q_vector = adapter->q_vector[q_idx];
1516 if (!q_vector->rxr_count)
1517 continue;
1518 napi_disable(&q_vector->napi); 1220 napi_disable(&q_vector->napi);
1519 } 1221 }
1520} 1222}
@@ -1532,9 +1234,8 @@ static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1532 ixgbevf_configure_rx(adapter); 1234 ixgbevf_configure_rx(adapter);
1533 for (i = 0; i < adapter->num_rx_queues; i++) { 1235 for (i = 0; i < adapter->num_rx_queues; i++) {
1534 struct ixgbevf_ring *ring = &adapter->rx_ring[i]; 1236 struct ixgbevf_ring *ring = &adapter->rx_ring[i];
1535 ixgbevf_alloc_rx_buffers(adapter, ring, ring->count); 1237 ixgbevf_alloc_rx_buffers(adapter, ring,
1536 ring->next_to_use = ring->count - 1; 1238 IXGBE_DESC_UNUSED(ring));
1537 writel(ring->next_to_use, adapter->hw.hw_addr + ring->tail);
1538 } 1239 }
1539} 1240}
1540 1241
@@ -1658,10 +1359,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1658 ixgbevf_save_reset_stats(adapter); 1359 ixgbevf_save_reset_stats(adapter);
1659 ixgbevf_init_last_counter_stats(adapter); 1360 ixgbevf_init_last_counter_stats(adapter);
1660 1361
1661 /* bring the link up in the watchdog, this could race with our first
1662 * link up interrupt but shouldn't be a problem */
1663 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1664 adapter->link_check_timeout = jiffies;
1665 mod_timer(&adapter->watchdog_timer, jiffies); 1362 mod_timer(&adapter->watchdog_timer, jiffies);
1666} 1363}
1667 1364
@@ -1676,7 +1373,7 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
1676 /* clear any pending interrupts, may auto mask */ 1373 /* clear any pending interrupts, may auto mask */
1677 IXGBE_READ_REG(hw, IXGBE_VTEICR); 1374 IXGBE_READ_REG(hw, IXGBE_VTEICR);
1678 1375
1679 ixgbevf_irq_enable(adapter, true, true); 1376 ixgbevf_irq_enable(adapter);
1680} 1377}
1681 1378
1682/** 1379/**
@@ -1714,14 +1411,6 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1714 dev_kfree_skb(this); 1411 dev_kfree_skb(this);
1715 } while (skb); 1412 } while (skb);
1716 } 1413 }
1717 if (!rx_buffer_info->page)
1718 continue;
1719 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
1720 PAGE_SIZE / 2, DMA_FROM_DEVICE);
1721 rx_buffer_info->page_dma = 0;
1722 put_page(rx_buffer_info->page);
1723 rx_buffer_info->page = NULL;
1724 rx_buffer_info->page_offset = 0;
1725 } 1414 }
1726 1415
1727 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 1416 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
@@ -1758,7 +1447,7 @@ static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
1758 1447
1759 for (i = 0; i < tx_ring->count; i++) { 1448 for (i = 0; i < tx_ring->count; i++) {
1760 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 1449 tx_buffer_info = &tx_ring->tx_buffer_info[i];
1761 ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info); 1450 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1762 } 1451 }
1763 1452
1764 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 1453 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
@@ -1891,10 +1580,9 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1891{ 1580{
1892 int err, vector_threshold; 1581 int err, vector_threshold;
1893 1582
1894 /* We'll want at least 3 (vector_threshold): 1583 /* We'll want at least 2 (vector_threshold):
1895 * 1) TxQ[0] Cleanup 1584 * 1) TxQ[0] + RxQ[0] handler
1896 * 2) RxQ[0] Cleanup 1585 * 2) Other (Link Status Change, etc.)
1897 * 3) Other (Link Status Change, etc.)
1898 */ 1586 */
1899 vector_threshold = MIN_MSIX_COUNT; 1587 vector_threshold = MIN_MSIX_COUNT;
1900 1588
@@ -1933,8 +1621,8 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1933 } 1621 }
1934} 1622}
1935 1623
1936/* 1624/**
1937 * ixgbevf_set_num_queues: Allocate queues for device, feature dependent 1625 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
1938 * @adapter: board private structure to initialize 1626 * @adapter: board private structure to initialize
1939 * 1627 *
1940 * This is the top level queue allocation routine. The order here is very 1628 * This is the top level queue allocation routine. The order here is very
@@ -1949,8 +1637,6 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1949 /* Start with base case */ 1637 /* Start with base case */
1950 adapter->num_rx_queues = 1; 1638 adapter->num_rx_queues = 1;
1951 adapter->num_tx_queues = 1; 1639 adapter->num_tx_queues = 1;
1952 adapter->num_rx_pools = adapter->num_rx_queues;
1953 adapter->num_rx_queues_per_pool = 1;
1954} 1640}
1955 1641
1956/** 1642/**
@@ -1979,12 +1665,16 @@ static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1979 adapter->tx_ring[i].count = adapter->tx_ring_count; 1665 adapter->tx_ring[i].count = adapter->tx_ring_count;
1980 adapter->tx_ring[i].queue_index = i; 1666 adapter->tx_ring[i].queue_index = i;
1981 adapter->tx_ring[i].reg_idx = i; 1667 adapter->tx_ring[i].reg_idx = i;
1668 adapter->tx_ring[i].dev = &adapter->pdev->dev;
1669 adapter->tx_ring[i].netdev = adapter->netdev;
1982 } 1670 }
1983 1671
1984 for (i = 0; i < adapter->num_rx_queues; i++) { 1672 for (i = 0; i < adapter->num_rx_queues; i++) {
1985 adapter->rx_ring[i].count = adapter->rx_ring_count; 1673 adapter->rx_ring[i].count = adapter->rx_ring_count;
1986 adapter->rx_ring[i].queue_index = i; 1674 adapter->rx_ring[i].queue_index = i;
1987 adapter->rx_ring[i].reg_idx = i; 1675 adapter->rx_ring[i].reg_idx = i;
1676 adapter->rx_ring[i].dev = &adapter->pdev->dev;
1677 adapter->rx_ring[i].netdev = adapter->netdev;
1988 } 1678 }
1989 1679
1990 return 0; 1680 return 0;
@@ -2011,10 +1701,12 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2011 * It's easy to be greedy for MSI-X vectors, but it really 1701 * It's easy to be greedy for MSI-X vectors, but it really
2012 * doesn't do us much good if we have a lot more vectors 1702 * doesn't do us much good if we have a lot more vectors
2013 * than CPU's. So let's be conservative and only ask for 1703 * than CPU's. So let's be conservative and only ask for
2014 * (roughly) twice the number of vectors as there are CPU's. 1704 * (roughly) the same number of vectors as there are CPU's.
1705 * The default is to use pairs of vectors.
2015 */ 1706 */
2016 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, 1707 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
2017 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS; 1708 v_budget = min_t(int, v_budget, num_online_cpus());
1709 v_budget += NON_Q_VECTORS;
2018 1710
2019 /* A failure in MSI-X entry allocation isn't fatal, but it does 1711 /* A failure in MSI-X entry allocation isn't fatal, but it does
2020 * mean we disable MSI-X capabilities of the adapter. */ 1712 * mean we disable MSI-X capabilities of the adapter. */
@@ -2045,12 +1737,8 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2045{ 1737{
2046 int q_idx, num_q_vectors; 1738 int q_idx, num_q_vectors;
2047 struct ixgbevf_q_vector *q_vector; 1739 struct ixgbevf_q_vector *q_vector;
2048 int napi_vectors;
2049 int (*poll)(struct napi_struct *, int);
2050 1740
2051 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1741 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2052 napi_vectors = adapter->num_rx_queues;
2053 poll = &ixgbevf_clean_rxonly;
2054 1742
2055 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1743 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2056 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL); 1744 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
@@ -2058,10 +1746,8 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2058 goto err_out; 1746 goto err_out;
2059 q_vector->adapter = adapter; 1747 q_vector->adapter = adapter;
2060 q_vector->v_idx = q_idx; 1748 q_vector->v_idx = q_idx;
2061 q_vector->eitr = adapter->eitr_param; 1749 netif_napi_add(adapter->netdev, &q_vector->napi,
2062 if (q_idx < napi_vectors) 1750 ixgbevf_poll, 64);
2063 netif_napi_add(adapter->netdev, &q_vector->napi,
2064 (*poll), 64);
2065 adapter->q_vector[q_idx] = q_vector; 1751 adapter->q_vector[q_idx] = q_vector;
2066 } 1752 }
2067 1753
@@ -2208,20 +1894,13 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2208 } 1894 }
2209 1895
2210 /* Enable dynamic interrupt throttling rates */ 1896 /* Enable dynamic interrupt throttling rates */
2211 adapter->eitr_param = 20000; 1897 adapter->rx_itr_setting = 1;
2212 adapter->itr_setting = 1; 1898 adapter->tx_itr_setting = 1;
2213
2214 /* set defaults for eitr in MegaBytes */
2215 adapter->eitr_low = 10;
2216 adapter->eitr_high = 20;
2217 1899
2218 /* set default ring sizes */ 1900 /* set default ring sizes */
2219 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; 1901 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2220 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD; 1902 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2221 1903
2222 /* enable rx csum by default */
2223 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
2224
2225 set_bit(__IXGBEVF_DOWN, &adapter->state); 1904 set_bit(__IXGBEVF_DOWN, &adapter->state);
2226 return 0; 1905 return 0;
2227 1906
@@ -2281,7 +1960,7 @@ static void ixgbevf_watchdog(unsigned long data)
2281{ 1960{
2282 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data; 1961 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2283 struct ixgbe_hw *hw = &adapter->hw; 1962 struct ixgbe_hw *hw = &adapter->hw;
2284 u64 eics = 0; 1963 u32 eics = 0;
2285 int i; 1964 int i;
2286 1965
2287 /* 1966 /*
@@ -2295,11 +1974,11 @@ static void ixgbevf_watchdog(unsigned long data)
2295 /* get one bit for every active tx/rx interrupt vector */ 1974 /* get one bit for every active tx/rx interrupt vector */
2296 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { 1975 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2297 struct ixgbevf_q_vector *qv = adapter->q_vector[i]; 1976 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
2298 if (qv->rxr_count || qv->txr_count) 1977 if (qv->rx.ring || qv->tx.ring)
2299 eics |= (1 << i); 1978 eics |= 1 << i;
2300 } 1979 }
2301 1980
2302 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, (u32)eics); 1981 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
2303 1982
2304watchdog_short_circuit: 1983watchdog_short_circuit:
2305 schedule_work(&adapter->watchdog_task); 1984 schedule_work(&adapter->watchdog_task);
@@ -2469,7 +2148,6 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2469 2148
2470 tx_ring->next_to_use = 0; 2149 tx_ring->next_to_use = 0;
2471 tx_ring->next_to_clean = 0; 2150 tx_ring->next_to_clean = 0;
2472 tx_ring->work_limit = tx_ring->count;
2473 return 0; 2151 return 0;
2474 2152
2475err: 2153err:
@@ -2673,7 +2351,7 @@ static int ixgbevf_open(struct net_device *netdev)
2673 if (err) 2351 if (err)
2674 goto err_req_irq; 2352 goto err_req_irq;
2675 2353
2676 ixgbevf_irq_enable(adapter, true, true); 2354 ixgbevf_irq_enable(adapter);
2677 2355
2678 return 0; 2356 return 0;
2679 2357
@@ -2715,172 +2393,153 @@ static int ixgbevf_close(struct net_device *netdev)
2715 return 0; 2393 return 0;
2716} 2394}
2717 2395
2718static int ixgbevf_tso(struct ixgbevf_adapter *adapter, 2396static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
2719 struct ixgbevf_ring *tx_ring, 2397 u32 vlan_macip_lens, u32 type_tucmd,
2720 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 2398 u32 mss_l4len_idx)
2721{ 2399{
2722 struct ixgbe_adv_tx_context_desc *context_desc; 2400 struct ixgbe_adv_tx_context_desc *context_desc;
2723 unsigned int i; 2401 u16 i = tx_ring->next_to_use;
2724 int err;
2725 struct ixgbevf_tx_buffer *tx_buffer_info;
2726 u32 vlan_macip_lens = 0, type_tucmd_mlhl;
2727 u32 mss_l4len_idx, l4len;
2728 2402
2729 if (skb_is_gso(skb)) { 2403 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
2730 if (skb_header_cloned(skb)) {
2731 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2732 if (err)
2733 return err;
2734 }
2735 l4len = tcp_hdrlen(skb);
2736 *hdr_len += l4len;
2737
2738 if (skb->protocol == htons(ETH_P_IP)) {
2739 struct iphdr *iph = ip_hdr(skb);
2740 iph->tot_len = 0;
2741 iph->check = 0;
2742 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2743 iph->daddr, 0,
2744 IPPROTO_TCP,
2745 0);
2746 adapter->hw_tso_ctxt++;
2747 } else if (skb_is_gso_v6(skb)) {
2748 ipv6_hdr(skb)->payload_len = 0;
2749 tcp_hdr(skb)->check =
2750 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2751 &ipv6_hdr(skb)->daddr,
2752 0, IPPROTO_TCP, 0);
2753 adapter->hw_tso6_ctxt++;
2754 }
2755 2404
2756 i = tx_ring->next_to_use; 2405 i++;
2406 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2757 2407
2758 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2408 /* set bits to identify this as an advanced context descriptor */
2759 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); 2409 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2760
2761 /* VLAN MACLEN IPLEN */
2762 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2763 vlan_macip_lens |=
2764 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
2765 vlan_macip_lens |= ((skb_network_offset(skb)) <<
2766 IXGBE_ADVTXD_MACLEN_SHIFT);
2767 *hdr_len += skb_network_offset(skb);
2768 vlan_macip_lens |=
2769 (skb_transport_header(skb) - skb_network_header(skb));
2770 *hdr_len +=
2771 (skb_transport_header(skb) - skb_network_header(skb));
2772 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2773 context_desc->seqnum_seed = 0;
2774
2775 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2776 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
2777 IXGBE_ADVTXD_DTYP_CTXT);
2778
2779 if (skb->protocol == htons(ETH_P_IP))
2780 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2781 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2782 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
2783
2784 /* MSS L4LEN IDX */
2785 mss_l4len_idx =
2786 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
2787 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
2788 /* use index 1 for TSO */
2789 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
2790 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2791
2792 tx_buffer_info->time_stamp = jiffies;
2793 tx_buffer_info->next_to_watch = i;
2794 2410
2795 i++; 2411 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2796 if (i == tx_ring->count) 2412 context_desc->seqnum_seed = 0;
2797 i = 0; 2413 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
2798 tx_ring->next_to_use = i; 2414 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2415}
2799 2416
2800 return true; 2417static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
2418 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2419{
2420 u32 vlan_macip_lens, type_tucmd;
2421 u32 mss_l4len_idx, l4len;
2422
2423 if (!skb_is_gso(skb))
2424 return 0;
2425
2426 if (skb_header_cloned(skb)) {
2427 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2428 if (err)
2429 return err;
2430 }
2431
2432 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2433 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
2434
2435 if (skb->protocol == htons(ETH_P_IP)) {
2436 struct iphdr *iph = ip_hdr(skb);
2437 iph->tot_len = 0;
2438 iph->check = 0;
2439 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2440 iph->daddr, 0,
2441 IPPROTO_TCP,
2442 0);
2443 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2444 } else if (skb_is_gso_v6(skb)) {
2445 ipv6_hdr(skb)->payload_len = 0;
2446 tcp_hdr(skb)->check =
2447 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2448 &ipv6_hdr(skb)->daddr,
2449 0, IPPROTO_TCP, 0);
2801 } 2450 }
2802 2451
2803 return false; 2452 /* compute header lengths */
2453 l4len = tcp_hdrlen(skb);
2454 *hdr_len += l4len;
2455 *hdr_len = skb_transport_offset(skb) + l4len;
2456
2457 /* mss_l4len_id: use 1 as index for TSO */
2458 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
2459 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
2460 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
2461
2462 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
2463 vlan_macip_lens = skb_network_header_len(skb);
2464 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2465 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2466
2467 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2468 type_tucmd, mss_l4len_idx);
2469
2470 return 1;
2804} 2471}
2805 2472
2806static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter, 2473static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
2807 struct ixgbevf_ring *tx_ring,
2808 struct sk_buff *skb, u32 tx_flags) 2474 struct sk_buff *skb, u32 tx_flags)
2809{ 2475{
2810 struct ixgbe_adv_tx_context_desc *context_desc;
2811 unsigned int i;
2812 struct ixgbevf_tx_buffer *tx_buffer_info;
2813 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2814 2476
2815 if (skb->ip_summed == CHECKSUM_PARTIAL ||
2816 (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
2817 i = tx_ring->next_to_use;
2818 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2819 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
2820
2821 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2822 vlan_macip_lens |= (tx_flags &
2823 IXGBE_TX_FLAGS_VLAN_MASK);
2824 vlan_macip_lens |= (skb_network_offset(skb) <<
2825 IXGBE_ADVTXD_MACLEN_SHIFT);
2826 if (skb->ip_summed == CHECKSUM_PARTIAL)
2827 vlan_macip_lens |= (skb_transport_header(skb) -
2828 skb_network_header(skb));
2829
2830 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2831 context_desc->seqnum_seed = 0;
2832
2833 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
2834 IXGBE_ADVTXD_DTYP_CTXT);
2835
2836 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2837 switch (skb->protocol) {
2838 case __constant_htons(ETH_P_IP):
2839 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2840 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2841 type_tucmd_mlhl |=
2842 IXGBE_ADVTXD_TUCMD_L4T_TCP;
2843 break;
2844 case __constant_htons(ETH_P_IPV6):
2845 /* XXX what about other V6 headers?? */
2846 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2847 type_tucmd_mlhl |=
2848 IXGBE_ADVTXD_TUCMD_L4T_TCP;
2849 break;
2850 default:
2851 if (unlikely(net_ratelimit())) {
2852 pr_warn("partial checksum but "
2853 "proto=%x!\n", skb->protocol);
2854 }
2855 break;
2856 }
2857 }
2858 2477
2859 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
2860 /* use index zero for tx checksum offload */
2861 context_desc->mss_l4len_idx = 0;
2862 2478
2863 tx_buffer_info->time_stamp = jiffies; 2479 u32 vlan_macip_lens = 0;
2864 tx_buffer_info->next_to_watch = i; 2480 u32 mss_l4len_idx = 0;
2481 u32 type_tucmd = 0;
2865 2482
2866 adapter->hw_csum_tx_good++; 2483 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2867 i++; 2484 u8 l4_hdr = 0;
2868 if (i == tx_ring->count) 2485 switch (skb->protocol) {
2869 i = 0; 2486 case __constant_htons(ETH_P_IP):
2870 tx_ring->next_to_use = i; 2487 vlan_macip_lens |= skb_network_header_len(skb);
2488 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2489 l4_hdr = ip_hdr(skb)->protocol;
2490 break;
2491 case __constant_htons(ETH_P_IPV6):
2492 vlan_macip_lens |= skb_network_header_len(skb);
2493 l4_hdr = ipv6_hdr(skb)->nexthdr;
2494 break;
2495 default:
2496 if (unlikely(net_ratelimit())) {
2497 dev_warn(tx_ring->dev,
2498 "partial checksum but proto=%x!\n",
2499 skb->protocol);
2500 }
2501 break;
2502 }
2871 2503
2872 return true; 2504 switch (l4_hdr) {
2505 case IPPROTO_TCP:
2506 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2507 mss_l4len_idx = tcp_hdrlen(skb) <<
2508 IXGBE_ADVTXD_L4LEN_SHIFT;
2509 break;
2510 case IPPROTO_SCTP:
2511 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2512 mss_l4len_idx = sizeof(struct sctphdr) <<
2513 IXGBE_ADVTXD_L4LEN_SHIFT;
2514 break;
2515 case IPPROTO_UDP:
2516 mss_l4len_idx = sizeof(struct udphdr) <<
2517 IXGBE_ADVTXD_L4LEN_SHIFT;
2518 break;
2519 default:
2520 if (unlikely(net_ratelimit())) {
2521 dev_warn(tx_ring->dev,
2522 "partial checksum but l4 proto=%x!\n",
2523 l4_hdr);
2524 }
2525 break;
2526 }
2873 } 2527 }
2874 2528
2875 return false; 2529 /* vlan_macip_lens: MACLEN, VLAN tag */
2530 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2531 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2532
2533 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2534 type_tucmd, mss_l4len_idx);
2535
2536 return (skb->ip_summed == CHECKSUM_PARTIAL);
2876} 2537}
2877 2538
2878static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter, 2539static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
2879 struct ixgbevf_ring *tx_ring,
2880 struct sk_buff *skb, u32 tx_flags, 2540 struct sk_buff *skb, u32 tx_flags,
2881 unsigned int first) 2541 unsigned int first)
2882{ 2542{
2883 struct pci_dev *pdev = adapter->pdev;
2884 struct ixgbevf_tx_buffer *tx_buffer_info; 2543 struct ixgbevf_tx_buffer *tx_buffer_info;
2885 unsigned int len; 2544 unsigned int len;
2886 unsigned int total = skb->len; 2545 unsigned int total = skb->len;
@@ -2899,12 +2558,11 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
2899 2558
2900 tx_buffer_info->length = size; 2559 tx_buffer_info->length = size;
2901 tx_buffer_info->mapped_as_page = false; 2560 tx_buffer_info->mapped_as_page = false;
2902 tx_buffer_info->dma = dma_map_single(&adapter->pdev->dev, 2561 tx_buffer_info->dma = dma_map_single(tx_ring->dev,
2903 skb->data + offset, 2562 skb->data + offset,
2904 size, DMA_TO_DEVICE); 2563 size, DMA_TO_DEVICE);
2905 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) 2564 if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
2906 goto dma_error; 2565 goto dma_error;
2907 tx_buffer_info->time_stamp = jiffies;
2908 tx_buffer_info->next_to_watch = i; 2566 tx_buffer_info->next_to_watch = i;
2909 2567
2910 len -= size; 2568 len -= size;
@@ -2929,12 +2587,12 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
2929 2587
2930 tx_buffer_info->length = size; 2588 tx_buffer_info->length = size;
2931 tx_buffer_info->dma = 2589 tx_buffer_info->dma =
2932 skb_frag_dma_map(&adapter->pdev->dev, frag, 2590 skb_frag_dma_map(tx_ring->dev, frag,
2933 offset, size, DMA_TO_DEVICE); 2591 offset, size, DMA_TO_DEVICE);
2934 tx_buffer_info->mapped_as_page = true; 2592 tx_buffer_info->mapped_as_page = true;
2935 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) 2593 if (dma_mapping_error(tx_ring->dev,
2594 tx_buffer_info->dma))
2936 goto dma_error; 2595 goto dma_error;
2937 tx_buffer_info->time_stamp = jiffies;
2938 tx_buffer_info->next_to_watch = i; 2596 tx_buffer_info->next_to_watch = i;
2939 2597
2940 len -= size; 2598 len -= size;
@@ -2955,15 +2613,15 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
2955 i = i - 1; 2613 i = i - 1;
2956 tx_ring->tx_buffer_info[i].skb = skb; 2614 tx_ring->tx_buffer_info[i].skb = skb;
2957 tx_ring->tx_buffer_info[first].next_to_watch = i; 2615 tx_ring->tx_buffer_info[first].next_to_watch = i;
2616 tx_ring->tx_buffer_info[first].time_stamp = jiffies;
2958 2617
2959 return count; 2618 return count;
2960 2619
2961dma_error: 2620dma_error:
2962 dev_err(&pdev->dev, "TX DMA map failed\n"); 2621 dev_err(tx_ring->dev, "TX DMA map failed\n");
2963 2622
2964 /* clear timestamp and dma mappings for failed tx_buffer_info map */ 2623 /* clear timestamp and dma mappings for failed tx_buffer_info map */
2965 tx_buffer_info->dma = 0; 2624 tx_buffer_info->dma = 0;
2966 tx_buffer_info->time_stamp = 0;
2967 tx_buffer_info->next_to_watch = 0; 2625 tx_buffer_info->next_to_watch = 0;
2968 count--; 2626 count--;
2969 2627
@@ -2974,14 +2632,13 @@ dma_error:
2974 if (i < 0) 2632 if (i < 0)
2975 i += tx_ring->count; 2633 i += tx_ring->count;
2976 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2634 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2977 ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info); 2635 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
2978 } 2636 }
2979 2637
2980 return count; 2638 return count;
2981} 2639}
2982 2640
2983static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter, 2641static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
2984 struct ixgbevf_ring *tx_ring, int tx_flags,
2985 int count, u32 paylen, u8 hdr_len) 2642 int count, u32 paylen, u8 hdr_len)
2986{ 2643{
2987 union ixgbe_adv_tx_desc *tx_desc = NULL; 2644 union ixgbe_adv_tx_desc *tx_desc = NULL;
@@ -2998,28 +2655,31 @@ static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
2998 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 2655 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2999 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; 2656 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
3000 2657
2658 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
2659 olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
2660
3001 if (tx_flags & IXGBE_TX_FLAGS_TSO) { 2661 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
3002 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; 2662 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
3003 2663
3004 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3005 IXGBE_ADVTXD_POPTS_SHIFT;
3006
3007 /* use index 1 context for tso */ 2664 /* use index 1 context for tso */
3008 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); 2665 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
3009 if (tx_flags & IXGBE_TX_FLAGS_IPV4) 2666 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3010 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 2667 olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
3011 IXGBE_ADVTXD_POPTS_SHIFT; 2668
2669 }
3012 2670
3013 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM) 2671 /*
3014 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 2672 * Check Context must be set if Tx switch is enabled, which it
3015 IXGBE_ADVTXD_POPTS_SHIFT; 2673 * always is for case where virtual functions are running
2674 */
2675 olinfo_status |= IXGBE_ADVTXD_CC;
3016 2676
3017 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); 2677 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
3018 2678
3019 i = tx_ring->next_to_use; 2679 i = tx_ring->next_to_use;
3020 while (count--) { 2680 while (count--) {
3021 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2681 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3022 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); 2682 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
3023 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); 2683 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
3024 tx_desc->read.cmd_type_len = 2684 tx_desc->read.cmd_type_len =
3025 cpu_to_le32(cmd_type_len | tx_buffer_info->length); 2685 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
@@ -3031,24 +2691,14 @@ static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
3031 2691
3032 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); 2692 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
3033 2693
3034 /*
3035 * Force memory writes to complete before letting h/w
3036 * know there are new descriptors to fetch. (Only
3037 * applicable for weak-ordered memory model archs,
3038 * such as IA-64).
3039 */
3040 wmb();
3041
3042 tx_ring->next_to_use = i; 2694 tx_ring->next_to_use = i;
3043 writel(i, adapter->hw.hw_addr + tx_ring->tail);
3044} 2695}
3045 2696
3046static int __ixgbevf_maybe_stop_tx(struct net_device *netdev, 2697static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3047 struct ixgbevf_ring *tx_ring, int size)
3048{ 2698{
3049 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2699 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
3050 2700
3051 netif_stop_subqueue(netdev, tx_ring->queue_index); 2701 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3052 /* Herbert's original patch had: 2702 /* Herbert's original patch had:
3053 * smp_mb__after_netif_stop_queue(); 2703 * smp_mb__after_netif_stop_queue();
3054 * but since that doesn't exist yet, just open code it. */ 2704 * but since that doesn't exist yet, just open code it. */
@@ -3060,17 +2710,16 @@ static int __ixgbevf_maybe_stop_tx(struct net_device *netdev,
3060 return -EBUSY; 2710 return -EBUSY;
3061 2711
3062 /* A reprieve! - use start_queue because it doesn't call schedule */ 2712 /* A reprieve! - use start_queue because it doesn't call schedule */
3063 netif_start_subqueue(netdev, tx_ring->queue_index); 2713 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3064 ++adapter->restart_queue; 2714 ++adapter->restart_queue;
3065 return 0; 2715 return 0;
3066} 2716}
3067 2717
3068static int ixgbevf_maybe_stop_tx(struct net_device *netdev, 2718static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3069 struct ixgbevf_ring *tx_ring, int size)
3070{ 2719{
3071 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) 2720 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
3072 return 0; 2721 return 0;
3073 return __ixgbevf_maybe_stop_tx(netdev, tx_ring, size); 2722 return __ixgbevf_maybe_stop_tx(tx_ring, size);
3074} 2723}
3075 2724
3076static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 2725static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
@@ -3081,54 +2730,66 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3081 unsigned int tx_flags = 0; 2730 unsigned int tx_flags = 0;
3082 u8 hdr_len = 0; 2731 u8 hdr_len = 0;
3083 int r_idx = 0, tso; 2732 int r_idx = 0, tso;
3084 int count = 0; 2733 u16 count = TXD_USE_COUNT(skb_headlen(skb));
3085 2734#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3086 unsigned int f; 2735 unsigned short f;
2736#endif
3087 2737
3088 tx_ring = &adapter->tx_ring[r_idx]; 2738 tx_ring = &adapter->tx_ring[r_idx];
3089 2739
2740 /*
2741 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
2742 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
2743 * + 2 desc gap to keep tail from touching head,
2744 * + 1 desc for context descriptor,
2745 * otherwise try next time
2746 */
2747#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
2748 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
2749 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
2750#else
2751 count += skb_shinfo(skb)->nr_frags;
2752#endif
2753 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
2754 adapter->tx_busy++;
2755 return NETDEV_TX_BUSY;
2756 }
2757
3090 if (vlan_tx_tag_present(skb)) { 2758 if (vlan_tx_tag_present(skb)) {
3091 tx_flags |= vlan_tx_tag_get(skb); 2759 tx_flags |= vlan_tx_tag_get(skb);
3092 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 2760 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3093 tx_flags |= IXGBE_TX_FLAGS_VLAN; 2761 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3094 } 2762 }
3095 2763
3096 /* four things can cause us to need a context descriptor */
3097 if (skb_is_gso(skb) ||
3098 (skb->ip_summed == CHECKSUM_PARTIAL) ||
3099 (tx_flags & IXGBE_TX_FLAGS_VLAN))
3100 count++;
3101
3102 count += TXD_USE_COUNT(skb_headlen(skb));
3103 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3104 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]));
3105
3106 if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count)) {
3107 adapter->tx_busy++;
3108 return NETDEV_TX_BUSY;
3109 }
3110
3111 first = tx_ring->next_to_use; 2764 first = tx_ring->next_to_use;
3112 2765
3113 if (skb->protocol == htons(ETH_P_IP)) 2766 if (skb->protocol == htons(ETH_P_IP))
3114 tx_flags |= IXGBE_TX_FLAGS_IPV4; 2767 tx_flags |= IXGBE_TX_FLAGS_IPV4;
3115 tso = ixgbevf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len); 2768 tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len);
3116 if (tso < 0) { 2769 if (tso < 0) {
3117 dev_kfree_skb_any(skb); 2770 dev_kfree_skb_any(skb);
3118 return NETDEV_TX_OK; 2771 return NETDEV_TX_OK;
3119 } 2772 }
3120 2773
3121 if (tso) 2774 if (tso)
3122 tx_flags |= IXGBE_TX_FLAGS_TSO; 2775 tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
3123 else if (ixgbevf_tx_csum(adapter, tx_ring, skb, tx_flags) && 2776 else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
3124 (skb->ip_summed == CHECKSUM_PARTIAL))
3125 tx_flags |= IXGBE_TX_FLAGS_CSUM; 2777 tx_flags |= IXGBE_TX_FLAGS_CSUM;
3126 2778
3127 ixgbevf_tx_queue(adapter, tx_ring, tx_flags, 2779 ixgbevf_tx_queue(tx_ring, tx_flags,
3128 ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first), 2780 ixgbevf_tx_map(tx_ring, skb, tx_flags, first),
3129 skb->len, hdr_len); 2781 skb->len, hdr_len);
2782 /*
2783 * Force memory writes to complete before letting h/w
2784 * know there are new descriptors to fetch. (Only
2785 * applicable for weak-ordered memory model archs,
2786 * such as IA-64).
2787 */
2788 wmb();
2789
2790 writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
3130 2791
3131 ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); 2792 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
3132 2793
3133 return NETDEV_TX_OK; 2794 return NETDEV_TX_OK;
3134} 2795}
@@ -3211,9 +2872,7 @@ static void ixgbevf_shutdown(struct pci_dev *pdev)
3211 ixgbevf_free_all_rx_resources(adapter); 2872 ixgbevf_free_all_rx_resources(adapter);
3212 } 2873 }
3213 2874
3214#ifdef CONFIG_PM
3215 pci_save_state(pdev); 2875 pci_save_state(pdev);
3216#endif
3217 2876
3218 pci_disable_device(pdev); 2877 pci_disable_device(pdev);
3219} 2878}
@@ -3256,19 +2915,6 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3256 return stats; 2915 return stats;
3257} 2916}
3258 2917
3259static int ixgbevf_set_features(struct net_device *netdev,
3260 netdev_features_t features)
3261{
3262 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3263
3264 if (features & NETIF_F_RXCSUM)
3265 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
3266 else
3267 adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
3268
3269 return 0;
3270}
3271
3272static const struct net_device_ops ixgbe_netdev_ops = { 2918static const struct net_device_ops ixgbe_netdev_ops = {
3273 .ndo_open = ixgbevf_open, 2919 .ndo_open = ixgbevf_open,
3274 .ndo_stop = ixgbevf_close, 2920 .ndo_stop = ixgbevf_close,
@@ -3281,7 +2927,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
3281 .ndo_tx_timeout = ixgbevf_tx_timeout, 2927 .ndo_tx_timeout = ixgbevf_tx_timeout,
3282 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, 2928 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
3283 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, 2929 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
3284 .ndo_set_features = ixgbevf_set_features,
3285}; 2930};
3286 2931
3287static void ixgbevf_assign_netdev_ops(struct net_device *dev) 2932static void ixgbevf_assign_netdev_ops(struct net_device *dev)
@@ -3341,12 +2986,8 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3341 2986
3342 pci_set_master(pdev); 2987 pci_set_master(pdev);
3343 2988
3344#ifdef HAVE_TX_MQ
3345 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter), 2989 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3346 MAX_TX_QUEUES); 2990 MAX_TX_QUEUES);
3347#else
3348 netdev = alloc_etherdev(sizeof(struct ixgbevf_adapter));
3349#endif
3350 if (!netdev) { 2991 if (!netdev) {
3351 err = -ENOMEM; 2992 err = -ENOMEM;
3352 goto err_alloc_etherdev; 2993 goto err_alloc_etherdev;
@@ -3387,10 +3028,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3387 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, 3028 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
3388 sizeof(struct ixgbe_mbx_operations)); 3029 sizeof(struct ixgbe_mbx_operations));
3389 3030
3390 adapter->flags &= ~IXGBE_FLAG_RX_PS_CAPABLE;
3391 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
3392 adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
3393
3394 /* setup the private structure */ 3031 /* setup the private structure */
3395 err = ixgbevf_sw_init(adapter); 3032 err = ixgbevf_sw_init(adapter);
3396 if (err) 3033 if (err)
@@ -3449,8 +3086,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3449 if (err) 3086 if (err)
3450 goto err_register; 3087 goto err_register;
3451 3088
3452 adapter->netdev_registered = true;
3453
3454 netif_carrier_off(netdev); 3089 netif_carrier_off(netdev);
3455 3090
3456 ixgbevf_init_last_counter_stats(adapter); 3091 ixgbevf_init_last_counter_stats(adapter);
@@ -3460,8 +3095,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3460 3095
3461 hw_dbg(hw, "MAC: %d\n", hw->mac.type); 3096 hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3462 3097
3463 hw_dbg(hw, "LRO is disabled\n");
3464
3465 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n"); 3098 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3466 cards_found++; 3099 cards_found++;
3467 return 0; 3100 return 0;
@@ -3501,10 +3134,8 @@ static void __devexit ixgbevf_remove(struct pci_dev *pdev)
3501 cancel_work_sync(&adapter->reset_task); 3134 cancel_work_sync(&adapter->reset_task);
3502 cancel_work_sync(&adapter->watchdog_task); 3135 cancel_work_sync(&adapter->watchdog_task);
3503 3136
3504 if (adapter->netdev_registered) { 3137 if (netdev->reg_state == NETREG_REGISTERED)
3505 unregister_netdev(netdev); 3138 unregister_netdev(netdev);
3506 adapter->netdev_registered = false;
3507 }
3508 3139
3509 ixgbevf_reset_interrupt_capability(adapter); 3140 ixgbevf_reset_interrupt_capability(adapter);
3510 3141
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 4ea6580d3ae8..c911d883c27e 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2743,6 +2743,17 @@ jme_set_features(struct net_device *netdev, netdev_features_t features)
2743 return 0; 2743 return 0;
2744} 2744}
2745 2745
2746#ifdef CONFIG_NET_POLL_CONTROLLER
2747static void jme_netpoll(struct net_device *dev)
2748{
2749 unsigned long flags;
2750
2751 local_irq_save(flags);
2752 jme_intr(dev->irq, dev);
2753 local_irq_restore(flags);
2754}
2755#endif
2756
2746static int 2757static int
2747jme_nway_reset(struct net_device *netdev) 2758jme_nway_reset(struct net_device *netdev)
2748{ 2759{
@@ -2944,6 +2955,9 @@ static const struct net_device_ops jme_netdev_ops = {
2944 .ndo_tx_timeout = jme_tx_timeout, 2955 .ndo_tx_timeout = jme_tx_timeout,
2945 .ndo_fix_features = jme_fix_features, 2956 .ndo_fix_features = jme_fix_features,
2946 .ndo_set_features = jme_set_features, 2957 .ndo_set_features = jme_set_features,
2958#ifdef CONFIG_NET_POLL_CONTROLLER
2959 .ndo_poll_controller = jme_netpoll,
2960#endif
2947}; 2961};
2948 2962
2949static int __devinit 2963static int __devinit
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 5dc9cbd51514..003c5bc7189f 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -149,7 +149,6 @@ ltq_etop_hw_receive(struct ltq_etop_chan *ch)
149 spin_unlock_irqrestore(&priv->lock, flags); 149 spin_unlock_irqrestore(&priv->lock, flags);
150 150
151 skb_put(skb, len); 151 skb_put(skb, len);
152 skb->dev = ch->netdev;
153 skb->protocol = eth_type_trans(skb, ch->netdev); 152 skb->protocol = eth_type_trans(skb, ch->netdev);
154 netif_receive_skb(skb); 153 netif_receive_skb(skb);
155} 154}
@@ -646,7 +645,7 @@ ltq_etop_init(struct net_device *dev)
646 memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr)); 645 memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr));
647 if (!is_valid_ether_addr(mac.sa_data)) { 646 if (!is_valid_ether_addr(mac.sa_data)) {
648 pr_warn("etop: invalid MAC, using random\n"); 647 pr_warn("etop: invalid MAC, using random\n");
649 random_ether_addr(mac.sa_data); 648 eth_random_addr(mac.sa_data);
650 random_mac = true; 649 random_mac = true;
651 } 650 }
652 651
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index f0f06b2bc28b..770ee557924c 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1896,7 +1896,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
1896 goto out_free; 1896 goto out_free;
1897 } 1897 }
1898 1898
1899 rx_desc = (struct rx_desc *)rxq->rx_desc_area; 1899 rx_desc = rxq->rx_desc_area;
1900 for (i = 0; i < rxq->rx_ring_size; i++) { 1900 for (i = 0; i < rxq->rx_ring_size; i++) {
1901 int nexti; 1901 int nexti;
1902 1902
@@ -2001,7 +2001,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
2001 2001
2002 txq->tx_desc_area_size = size; 2002 txq->tx_desc_area_size = size;
2003 2003
2004 tx_desc = (struct tx_desc *)txq->tx_desc_area; 2004 tx_desc = txq->tx_desc_area;
2005 for (i = 0; i < txq->tx_ring_size; i++) { 2005 for (i = 0; i < txq->tx_ring_size; i++) {
2006 struct tx_desc *txd = tx_desc + i; 2006 struct tx_desc *txd = tx_desc + i;
2007 int nexti; 2007 int nexti;
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 1db023b075a1..59489722e898 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1032,7 +1032,7 @@ static int rxq_init(struct net_device *dev)
1032 } 1032 }
1033 memset((void *)pep->p_rx_desc_area, 0, size); 1033 memset((void *)pep->p_rx_desc_area, 0, size);
1034 /* initialize the next_desc_ptr links in the Rx descriptors ring */ 1034 /* initialize the next_desc_ptr links in the Rx descriptors ring */
1035 p_rx_desc = (struct rx_desc *)pep->p_rx_desc_area; 1035 p_rx_desc = pep->p_rx_desc_area;
1036 for (i = 0; i < rx_desc_num; i++) { 1036 for (i = 0; i < rx_desc_num; i++) {
1037 p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma + 1037 p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma +
1038 ((i + 1) % rx_desc_num) * sizeof(struct rx_desc); 1038 ((i + 1) % rx_desc_num) * sizeof(struct rx_desc);
@@ -1095,7 +1095,7 @@ static int txq_init(struct net_device *dev)
1095 } 1095 }
1096 memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size); 1096 memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size);
1097 /* Initialize the next_desc_ptr links in the Tx descriptors ring */ 1097 /* Initialize the next_desc_ptr links in the Tx descriptors ring */
1098 p_tx_desc = (struct tx_desc *)pep->p_tx_desc_area; 1098 p_tx_desc = pep->p_tx_desc_area;
1099 for (i = 0; i < tx_desc_num; i++) { 1099 for (i = 0; i < tx_desc_num; i++) {
1100 p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma + 1100 p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma +
1101 ((i + 1) % tx_desc_num) * sizeof(struct tx_desc); 1101 ((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 28a54451a3e5..2b0748dba8b8 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -141,6 +141,7 @@ static DEFINE_PCI_DEVICE_TABLE(sky2_id_table) = {
141 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */ 141 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */
142 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */ 142 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */
143 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */ 143 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */
144 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4382) }, /* 88E8079 */
144 { 0 } 145 { 0 }
145}; 146};
146 147
@@ -3079,8 +3080,10 @@ static irqreturn_t sky2_intr(int irq, void *dev_id)
3079 3080
3080 /* Reading this mask interrupts as side effect */ 3081 /* Reading this mask interrupts as side effect */
3081 status = sky2_read32(hw, B0_Y2_SP_ISRC2); 3082 status = sky2_read32(hw, B0_Y2_SP_ISRC2);
3082 if (status == 0 || status == ~0) 3083 if (status == 0 || status == ~0) {
3084 sky2_write32(hw, B0_Y2_SP_ICR, 2);
3083 return IRQ_NONE; 3085 return IRQ_NONE;
3086 }
3084 3087
3085 prefetch(&hw->st_le[hw->st_idx]); 3088 prefetch(&hw->st_le[hw->st_idx]);
3086 3089
@@ -3349,6 +3352,17 @@ static void sky2_reset(struct sky2_hw *hw)
3349 sky2_pci_write16(hw, pdev->pcie_cap + PCI_EXP_LNKCTL, 3352 sky2_pci_write16(hw, pdev->pcie_cap + PCI_EXP_LNKCTL,
3350 reg); 3353 reg);
3351 3354
3355 if (hw->chip_id == CHIP_ID_YUKON_PRM &&
3356 hw->chip_rev == CHIP_REV_YU_PRM_A0) {
3357 /* change PHY Interrupt polarity to low active */
3358 reg = sky2_read16(hw, GPHY_CTRL);
3359 sky2_write16(hw, GPHY_CTRL, reg | GPC_INTPOL);
3360
3361 /* adapt HW for low active PHY Interrupt */
3362 reg = sky2_read16(hw, Y2_CFG_SPC + PCI_LDO_CTRL);
3363 sky2_write16(hw, Y2_CFG_SPC + PCI_LDO_CTRL, reg | PHY_M_UNDOC1);
3364 }
3365
3352 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3366 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3353 3367
3354 /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */ 3368 /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */
@@ -4871,7 +4885,7 @@ static const char *sky2_name(u8 chipid, char *buf, int sz)
4871 "UL 2", /* 0xba */ 4885 "UL 2", /* 0xba */
4872 "Unknown", /* 0xbb */ 4886 "Unknown", /* 0xbb */
4873 "Optima", /* 0xbc */ 4887 "Optima", /* 0xbc */
4874 "Optima Prime", /* 0xbd */ 4888 "OptimaEEE", /* 0xbd */
4875 "Optima 2", /* 0xbe */ 4889 "Optima 2", /* 0xbe */
4876 }; 4890 };
4877 4891
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
index 3c896ce80b71..615ac63ea860 100644
--- a/drivers/net/ethernet/marvell/sky2.h
+++ b/drivers/net/ethernet/marvell/sky2.h
@@ -23,6 +23,7 @@ enum {
23 PSM_CONFIG_REG3 = 0x164, 23 PSM_CONFIG_REG3 = 0x164,
24 PSM_CONFIG_REG4 = 0x168, 24 PSM_CONFIG_REG4 = 0x168,
25 25
26 PCI_LDO_CTRL = 0xbc,
26}; 27};
27 28
28/* Yukon-2 */ 29/* Yukon-2 */
@@ -586,6 +587,10 @@ enum yukon_supr_rev {
586 CHIP_REV_YU_SU_B1 = 3, 587 CHIP_REV_YU_SU_B1 = 3,
587}; 588};
588 589
590enum yukon_prm_rev {
591 CHIP_REV_YU_PRM_Z1 = 1,
592 CHIP_REV_YU_PRM_A0 = 2,
593};
589 594
590/* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */ 595/* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */
591enum { 596enum {
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 842c8ce9494e..7e94987d030c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1080,6 +1080,25 @@ static struct mlx4_cmd_info cmd_info[] = {
1080 .verify = NULL, 1080 .verify = NULL,
1081 .wrapper = NULL 1081 .wrapper = NULL
1082 }, 1082 },
1083 /* flow steering commands */
1084 {
1085 .opcode = MLX4_QP_FLOW_STEERING_ATTACH,
1086 .has_inbox = true,
1087 .has_outbox = false,
1088 .out_is_imm = true,
1089 .encode_slave_id = false,
1090 .verify = NULL,
1091 .wrapper = mlx4_QP_FLOW_STEERING_ATTACH_wrapper
1092 },
1093 {
1094 .opcode = MLX4_QP_FLOW_STEERING_DETACH,
1095 .has_inbox = false,
1096 .has_outbox = false,
1097 .out_is_imm = false,
1098 .encode_slave_id = false,
1099 .verify = NULL,
1100 .wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper
1101 },
1083}; 1102};
1084 1103
1085static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave, 1104static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 908a460d8db6..aa9c2f6cf3c0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -77,6 +77,12 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
77 struct mlx4_en_dev *mdev = priv->mdev; 77 struct mlx4_en_dev *mdev = priv->mdev;
78 int err = 0; 78 int err = 0;
79 char name[25]; 79 char name[25];
80 struct cpu_rmap *rmap =
81#ifdef CONFIG_RFS_ACCEL
82 priv->dev->rx_cpu_rmap;
83#else
84 NULL;
85#endif
80 86
81 cq->dev = mdev->pndev[priv->port]; 87 cq->dev = mdev->pndev[priv->port];
82 cq->mcq.set_ci_db = cq->wqres.db.db; 88 cq->mcq.set_ci_db = cq->wqres.db.db;
@@ -91,7 +97,8 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
91 sprintf(name, "%s-%d", priv->dev->name, 97 sprintf(name, "%s-%d", priv->dev->name,
92 cq->ring); 98 cq->ring);
93 /* Set IRQ for specific name (per ring) */ 99 /* Set IRQ for specific name (per ring) */
94 if (mlx4_assign_eq(mdev->dev, name, &cq->vector)) { 100 if (mlx4_assign_eq(mdev->dev, name, rmap,
101 &cq->vector)) {
95 cq->vector = (cq->ring + 1 + priv->port) 102 cq->vector = (cq->ring + 1 + priv->port)
96 % mdev->dev->caps.num_comp_vectors; 103 % mdev->dev->caps.num_comp_vectors;
97 mlx4_warn(mdev, "Failed Assigning an EQ to " 104 mlx4_warn(mdev, "Failed Assigning an EQ to "
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 72901ce2b088..9d0b88eea02b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -34,10 +34,14 @@
34#include <linux/kernel.h> 34#include <linux/kernel.h>
35#include <linux/ethtool.h> 35#include <linux/ethtool.h>
36#include <linux/netdevice.h> 36#include <linux/netdevice.h>
37#include <linux/mlx4/driver.h>
37 38
38#include "mlx4_en.h" 39#include "mlx4_en.h"
39#include "en_port.h" 40#include "en_port.h"
40 41
42#define EN_ETHTOOL_QP_ATTACH (1ull << 63)
43#define EN_ETHTOOL_SHORT_MASK cpu_to_be16(0xffff)
44#define EN_ETHTOOL_WORD_MASK cpu_to_be32(0xffffffff)
41 45
42static void 46static void
43mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) 47mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
@@ -599,16 +603,369 @@ static int mlx4_en_set_rxfh_indir(struct net_device *dev,
599 return err; 603 return err;
600} 604}
601 605
606#define all_zeros_or_all_ones(field) \
607 ((field) == 0 || (field) == (__force typeof(field))-1)
608
609static int mlx4_en_validate_flow(struct net_device *dev,
610 struct ethtool_rxnfc *cmd)
611{
612 struct ethtool_usrip4_spec *l3_mask;
613 struct ethtool_tcpip4_spec *l4_mask;
614 struct ethhdr *eth_mask;
615 u64 full_mac = ~0ull;
616 u64 zero_mac = 0;
617
618 if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
619 return -EINVAL;
620
621 switch (cmd->fs.flow_type & ~FLOW_EXT) {
622 case TCP_V4_FLOW:
623 case UDP_V4_FLOW:
624 if (cmd->fs.m_u.tcp_ip4_spec.tos)
625 return -EINVAL;
626 l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
627 /* don't allow mask which isn't all 0 or 1 */
628 if (!all_zeros_or_all_ones(l4_mask->ip4src) ||
629 !all_zeros_or_all_ones(l4_mask->ip4dst) ||
630 !all_zeros_or_all_ones(l4_mask->psrc) ||
631 !all_zeros_or_all_ones(l4_mask->pdst))
632 return -EINVAL;
633 break;
634 case IP_USER_FLOW:
635 l3_mask = &cmd->fs.m_u.usr_ip4_spec;
636 if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto ||
637 cmd->fs.h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4 ||
638 (!l3_mask->ip4src && !l3_mask->ip4dst) ||
639 !all_zeros_or_all_ones(l3_mask->ip4src) ||
640 !all_zeros_or_all_ones(l3_mask->ip4dst))
641 return -EINVAL;
642 break;
643 case ETHER_FLOW:
644 eth_mask = &cmd->fs.m_u.ether_spec;
645 /* source mac mask must not be set */
646 if (memcmp(eth_mask->h_source, &zero_mac, ETH_ALEN))
647 return -EINVAL;
648
649 /* dest mac mask must be ff:ff:ff:ff:ff:ff */
650 if (memcmp(eth_mask->h_dest, &full_mac, ETH_ALEN))
651 return -EINVAL;
652
653 if (!all_zeros_or_all_ones(eth_mask->h_proto))
654 return -EINVAL;
655 break;
656 default:
657 return -EINVAL;
658 }
659
660 if ((cmd->fs.flow_type & FLOW_EXT)) {
661 if (cmd->fs.m_ext.vlan_etype ||
662 !(cmd->fs.m_ext.vlan_tci == 0 ||
663 cmd->fs.m_ext.vlan_tci == cpu_to_be16(0xfff)))
664 return -EINVAL;
665 }
666
667 return 0;
668}
669
670static int add_ip_rule(struct mlx4_en_priv *priv,
671 struct ethtool_rxnfc *cmd,
672 struct list_head *list_h)
673{
674 struct mlx4_spec_list *spec_l3;
675 struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec;
676
677 spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL);
678 if (!spec_l3) {
679 en_err(priv, "Fail to alloc ethtool rule.\n");
680 return -ENOMEM;
681 }
682
683 spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
684 spec_l3->ipv4.src_ip = cmd->fs.h_u.usr_ip4_spec.ip4src;
685 if (l3_mask->ip4src)
686 spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK;
687 spec_l3->ipv4.dst_ip = cmd->fs.h_u.usr_ip4_spec.ip4dst;
688 if (l3_mask->ip4dst)
689 spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK;
690 list_add_tail(&spec_l3->list, list_h);
691
692 return 0;
693}
694
695static int add_tcp_udp_rule(struct mlx4_en_priv *priv,
696 struct ethtool_rxnfc *cmd,
697 struct list_head *list_h, int proto)
698{
699 struct mlx4_spec_list *spec_l3;
700 struct mlx4_spec_list *spec_l4;
701 struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
702
703 spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL);
704 spec_l4 = kzalloc(sizeof *spec_l4, GFP_KERNEL);
705 if (!spec_l4 || !spec_l3) {
706 en_err(priv, "Fail to alloc ethtool rule.\n");
707 kfree(spec_l3);
708 kfree(spec_l4);
709 return -ENOMEM;
710 }
711
712 spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
713
714 if (proto == TCP_V4_FLOW) {
715 spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP;
716 spec_l3->ipv4.src_ip = cmd->fs.h_u.tcp_ip4_spec.ip4src;
717 spec_l3->ipv4.dst_ip = cmd->fs.h_u.tcp_ip4_spec.ip4dst;
718 spec_l4->tcp_udp.src_port = cmd->fs.h_u.tcp_ip4_spec.psrc;
719 spec_l4->tcp_udp.dst_port = cmd->fs.h_u.tcp_ip4_spec.pdst;
720 } else {
721 spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP;
722 spec_l3->ipv4.src_ip = cmd->fs.h_u.udp_ip4_spec.ip4src;
723 spec_l3->ipv4.dst_ip = cmd->fs.h_u.udp_ip4_spec.ip4dst;
724 spec_l4->tcp_udp.src_port = cmd->fs.h_u.udp_ip4_spec.psrc;
725 spec_l4->tcp_udp.dst_port = cmd->fs.h_u.udp_ip4_spec.pdst;
726 }
727
728 if (l4_mask->ip4src)
729 spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK;
730 if (l4_mask->ip4dst)
731 spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK;
732
733 if (l4_mask->psrc)
734 spec_l4->tcp_udp.src_port_msk = EN_ETHTOOL_SHORT_MASK;
735 if (l4_mask->pdst)
736 spec_l4->tcp_udp.dst_port_msk = EN_ETHTOOL_SHORT_MASK;
737
738 list_add_tail(&spec_l3->list, list_h);
739 list_add_tail(&spec_l4->list, list_h);
740
741 return 0;
742}
743
744static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev,
745 struct ethtool_rxnfc *cmd,
746 struct list_head *rule_list_h)
747{
748 int err;
749 u64 mac;
750 __be64 be_mac;
751 struct ethhdr *eth_spec;
752 struct mlx4_en_priv *priv = netdev_priv(dev);
753 struct mlx4_spec_list *spec_l2;
754 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
755
756 err = mlx4_en_validate_flow(dev, cmd);
757 if (err)
758 return err;
759
760 spec_l2 = kzalloc(sizeof *spec_l2, GFP_KERNEL);
761 if (!spec_l2)
762 return -ENOMEM;
763
764 mac = priv->mac & MLX4_MAC_MASK;
765 be_mac = cpu_to_be64(mac << 16);
766
767 spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
768 memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
769 if ((cmd->fs.flow_type & ~FLOW_EXT) != ETHER_FLOW)
770 memcpy(spec_l2->eth.dst_mac, &be_mac, ETH_ALEN);
771
772 if ((cmd->fs.flow_type & FLOW_EXT) && cmd->fs.m_ext.vlan_tci) {
773 spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci;
774 spec_l2->eth.vlan_id_msk = cpu_to_be16(0xfff);
775 }
776
777 list_add_tail(&spec_l2->list, rule_list_h);
778
779 switch (cmd->fs.flow_type & ~FLOW_EXT) {
780 case ETHER_FLOW:
781 eth_spec = &cmd->fs.h_u.ether_spec;
782 memcpy(&spec_l2->eth.dst_mac, eth_spec->h_dest, ETH_ALEN);
783 spec_l2->eth.ether_type = eth_spec->h_proto;
784 if (eth_spec->h_proto)
785 spec_l2->eth.ether_type_enable = 1;
786 break;
787 case IP_USER_FLOW:
788 err = add_ip_rule(priv, cmd, rule_list_h);
789 break;
790 case TCP_V4_FLOW:
791 err = add_tcp_udp_rule(priv, cmd, rule_list_h, TCP_V4_FLOW);
792 break;
793 case UDP_V4_FLOW:
794 err = add_tcp_udp_rule(priv, cmd, rule_list_h, UDP_V4_FLOW);
795 break;
796 }
797
798 return err;
799}
800
801static int mlx4_en_flow_replace(struct net_device *dev,
802 struct ethtool_rxnfc *cmd)
803{
804 int err;
805 struct mlx4_en_priv *priv = netdev_priv(dev);
806 struct ethtool_flow_id *loc_rule;
807 struct mlx4_spec_list *spec, *tmp_spec;
808 u32 qpn;
809 u64 reg_id;
810
811 struct mlx4_net_trans_rule rule = {
812 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
813 .exclusive = 0,
814 .allow_loopback = 1,
815 .promisc_mode = MLX4_FS_PROMISC_NONE,
816 };
817
818 rule.port = priv->port;
819 rule.priority = MLX4_DOMAIN_ETHTOOL | cmd->fs.location;
820 INIT_LIST_HEAD(&rule.list);
821
822 /* Allow direct QP attaches if the EN_ETHTOOL_QP_ATTACH flag is set */
823 if (cmd->fs.ring_cookie == RX_CLS_FLOW_DISC)
824 qpn = priv->drop_qp.qpn;
825 else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) {
826 qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
827 } else {
828 if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
829 en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist.\n",
830 cmd->fs.ring_cookie);
831 return -EINVAL;
832 }
833 qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn;
834 if (!qpn) {
835 en_warn(priv, "rxnfc: RX ring (%llu) is inactive.\n",
836 cmd->fs.ring_cookie);
837 return -EINVAL;
838 }
839 }
840 rule.qpn = qpn;
841 err = mlx4_en_ethtool_to_net_trans_rule(dev, cmd, &rule.list);
842 if (err)
843 goto out_free_list;
844
845 loc_rule = &priv->ethtool_rules[cmd->fs.location];
846 if (loc_rule->id) {
847 err = mlx4_flow_detach(priv->mdev->dev, loc_rule->id);
848 if (err) {
849 en_err(priv, "Fail to detach network rule at location %d. registration id = %llx\n",
850 cmd->fs.location, loc_rule->id);
851 goto out_free_list;
852 }
853 loc_rule->id = 0;
854 memset(&loc_rule->flow_spec, 0,
855 sizeof(struct ethtool_rx_flow_spec));
856 }
857 err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
858 if (err) {
859 en_err(priv, "Fail to attach network rule at location %d.\n",
860 cmd->fs.location);
861 goto out_free_list;
862 }
863 loc_rule->id = reg_id;
864 memcpy(&loc_rule->flow_spec, &cmd->fs,
865 sizeof(struct ethtool_rx_flow_spec));
866
867out_free_list:
868 list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) {
869 list_del(&spec->list);
870 kfree(spec);
871 }
872 return err;
873}
874
875static int mlx4_en_flow_detach(struct net_device *dev,
876 struct ethtool_rxnfc *cmd)
877{
878 int err = 0;
879 struct ethtool_flow_id *rule;
880 struct mlx4_en_priv *priv = netdev_priv(dev);
881
882 if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
883 return -EINVAL;
884
885 rule = &priv->ethtool_rules[cmd->fs.location];
886 if (!rule->id) {
887 err = -ENOENT;
888 goto out;
889 }
890
891 err = mlx4_flow_detach(priv->mdev->dev, rule->id);
892 if (err) {
893 en_err(priv, "Fail to detach network rule at location %d. registration id = 0x%llx\n",
894 cmd->fs.location, rule->id);
895 goto out;
896 }
897 rule->id = 0;
898 memset(&rule->flow_spec, 0, sizeof(struct ethtool_rx_flow_spec));
899out:
900 return err;
901
902}
903
904static int mlx4_en_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd,
905 int loc)
906{
907 int err = 0;
908 struct ethtool_flow_id *rule;
909 struct mlx4_en_priv *priv = netdev_priv(dev);
910
911 if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES)
912 return -EINVAL;
913
914 rule = &priv->ethtool_rules[loc];
915 if (rule->id)
916 memcpy(&cmd->fs, &rule->flow_spec,
917 sizeof(struct ethtool_rx_flow_spec));
918 else
919 err = -ENOENT;
920
921 return err;
922}
923
924static int mlx4_en_get_num_flows(struct mlx4_en_priv *priv)
925{
926
927 int i, res = 0;
928 for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
929 if (priv->ethtool_rules[i].id)
930 res++;
931 }
932 return res;
933
934}
935
602static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 936static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
603 u32 *rule_locs) 937 u32 *rule_locs)
604{ 938{
605 struct mlx4_en_priv *priv = netdev_priv(dev); 939 struct mlx4_en_priv *priv = netdev_priv(dev);
940 struct mlx4_en_dev *mdev = priv->mdev;
606 int err = 0; 941 int err = 0;
942 int i = 0, priority = 0;
943
944 if ((cmd->cmd == ETHTOOL_GRXCLSRLCNT ||
945 cmd->cmd == ETHTOOL_GRXCLSRULE ||
946 cmd->cmd == ETHTOOL_GRXCLSRLALL) &&
947 mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
948 return -EINVAL;
607 949
608 switch (cmd->cmd) { 950 switch (cmd->cmd) {
609 case ETHTOOL_GRXRINGS: 951 case ETHTOOL_GRXRINGS:
610 cmd->data = priv->rx_ring_num; 952 cmd->data = priv->rx_ring_num;
611 break; 953 break;
954 case ETHTOOL_GRXCLSRLCNT:
955 cmd->rule_cnt = mlx4_en_get_num_flows(priv);
956 break;
957 case ETHTOOL_GRXCLSRULE:
958 err = mlx4_en_get_flow(dev, cmd, cmd->fs.location);
959 break;
960 case ETHTOOL_GRXCLSRLALL:
961 while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) {
962 err = mlx4_en_get_flow(dev, cmd, i);
963 if (!err)
964 rule_locs[priority++] = i;
965 i++;
966 }
967 err = 0;
968 break;
612 default: 969 default:
613 err = -EOPNOTSUPP; 970 err = -EOPNOTSUPP;
614 break; 971 break;
@@ -617,6 +974,30 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
617 return err; 974 return err;
618} 975}
619 976
977static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
978{
979 int err = 0;
980 struct mlx4_en_priv *priv = netdev_priv(dev);
981 struct mlx4_en_dev *mdev = priv->mdev;
982
983 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
984 return -EINVAL;
985
986 switch (cmd->cmd) {
987 case ETHTOOL_SRXCLSRLINS:
988 err = mlx4_en_flow_replace(dev, cmd);
989 break;
990 case ETHTOOL_SRXCLSRLDEL:
991 err = mlx4_en_flow_detach(dev, cmd);
992 break;
993 default:
994 en_warn(priv, "Unsupported ethtool command. (%d)\n", cmd->cmd);
995 return -EINVAL;
996 }
997
998 return err;
999}
1000
620const struct ethtool_ops mlx4_en_ethtool_ops = { 1001const struct ethtool_ops mlx4_en_ethtool_ops = {
621 .get_drvinfo = mlx4_en_get_drvinfo, 1002 .get_drvinfo = mlx4_en_get_drvinfo,
622 .get_settings = mlx4_en_get_settings, 1003 .get_settings = mlx4_en_get_settings,
@@ -637,6 +1018,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
637 .get_ringparam = mlx4_en_get_ringparam, 1018 .get_ringparam = mlx4_en_get_ringparam,
638 .set_ringparam = mlx4_en_set_ringparam, 1019 .set_ringparam = mlx4_en_set_ringparam,
639 .get_rxnfc = mlx4_en_get_rxnfc, 1020 .get_rxnfc = mlx4_en_get_rxnfc,
1021 .set_rxnfc = mlx4_en_set_rxnfc,
640 .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size, 1022 .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
641 .get_rxfh_indir = mlx4_en_get_rxfh_indir, 1023 .get_rxfh_indir = mlx4_en_get_rxfh_indir,
642 .set_rxfh_indir = mlx4_en_set_rxfh_indir, 1024 .set_rxfh_indir = mlx4_en_set_rxfh_indir,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 073b85b45fc5..8864d8b53737 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -36,6 +36,8 @@
36#include <linux/if_vlan.h> 36#include <linux/if_vlan.h>
37#include <linux/delay.h> 37#include <linux/delay.h>
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/hash.h>
40#include <net/ip.h>
39 41
40#include <linux/mlx4/driver.h> 42#include <linux/mlx4/driver.h>
41#include <linux/mlx4/device.h> 43#include <linux/mlx4/device.h>
@@ -66,6 +68,299 @@ static int mlx4_en_setup_tc(struct net_device *dev, u8 up)
66 return 0; 68 return 0;
67} 69}
68 70
71#ifdef CONFIG_RFS_ACCEL
72
73struct mlx4_en_filter {
74 struct list_head next;
75 struct work_struct work;
76
77 __be32 src_ip;
78 __be32 dst_ip;
79 __be16 src_port;
80 __be16 dst_port;
81
82 int rxq_index;
83 struct mlx4_en_priv *priv;
84 u32 flow_id; /* RFS infrastructure id */
85 int id; /* mlx4_en driver id */
86 u64 reg_id; /* Flow steering API id */
87 u8 activated; /* Used to prevent expiry before filter
88 * is attached
89 */
90 struct hlist_node filter_chain;
91};
92
93static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
94
95static void mlx4_en_filter_work(struct work_struct *work)
96{
97 struct mlx4_en_filter *filter = container_of(work,
98 struct mlx4_en_filter,
99 work);
100 struct mlx4_en_priv *priv = filter->priv;
101 struct mlx4_spec_list spec_tcp = {
102 .id = MLX4_NET_TRANS_RULE_ID_TCP,
103 {
104 .tcp_udp = {
105 .dst_port = filter->dst_port,
106 .dst_port_msk = (__force __be16)-1,
107 .src_port = filter->src_port,
108 .src_port_msk = (__force __be16)-1,
109 },
110 },
111 };
112 struct mlx4_spec_list spec_ip = {
113 .id = MLX4_NET_TRANS_RULE_ID_IPV4,
114 {
115 .ipv4 = {
116 .dst_ip = filter->dst_ip,
117 .dst_ip_msk = (__force __be32)-1,
118 .src_ip = filter->src_ip,
119 .src_ip_msk = (__force __be32)-1,
120 },
121 },
122 };
123 struct mlx4_spec_list spec_eth = {
124 .id = MLX4_NET_TRANS_RULE_ID_ETH,
125 };
126 struct mlx4_net_trans_rule rule = {
127 .list = LIST_HEAD_INIT(rule.list),
128 .queue_mode = MLX4_NET_TRANS_Q_LIFO,
129 .exclusive = 1,
130 .allow_loopback = 1,
131 .promisc_mode = MLX4_FS_PROMISC_NONE,
132 .port = priv->port,
133 .priority = MLX4_DOMAIN_RFS,
134 };
135 int rc;
136 __be64 mac;
137 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
138
139 list_add_tail(&spec_eth.list, &rule.list);
140 list_add_tail(&spec_ip.list, &rule.list);
141 list_add_tail(&spec_tcp.list, &rule.list);
142
143 mac = cpu_to_be64((priv->mac & MLX4_MAC_MASK) << 16);
144
145 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
146 memcpy(spec_eth.eth.dst_mac, &mac, ETH_ALEN);
147 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
148
149 filter->activated = 0;
150
151 if (filter->reg_id) {
152 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
153 if (rc && rc != -ENOENT)
154 en_err(priv, "Error detaching flow. rc = %d\n", rc);
155 }
156
157 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
158 if (rc)
159 en_err(priv, "Error attaching flow. err = %d\n", rc);
160
161 mlx4_en_filter_rfs_expire(priv);
162
163 filter->activated = 1;
164}
165
166static inline struct hlist_head *
167filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
168 __be16 src_port, __be16 dst_port)
169{
170 unsigned long l;
171 int bucket_idx;
172
173 l = (__force unsigned long)src_port |
174 ((__force unsigned long)dst_port << 2);
175 l ^= (__force unsigned long)(src_ip ^ dst_ip);
176
177 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
178
179 return &priv->filter_hash[bucket_idx];
180}
181
182static struct mlx4_en_filter *
183mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
184 __be32 dst_ip, __be16 src_port, __be16 dst_port,
185 u32 flow_id)
186{
187 struct mlx4_en_filter *filter = NULL;
188
189 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
190 if (!filter)
191 return NULL;
192
193 filter->priv = priv;
194 filter->rxq_index = rxq_index;
195 INIT_WORK(&filter->work, mlx4_en_filter_work);
196
197 filter->src_ip = src_ip;
198 filter->dst_ip = dst_ip;
199 filter->src_port = src_port;
200 filter->dst_port = dst_port;
201
202 filter->flow_id = flow_id;
203
204 filter->id = priv->last_filter_id++;
205
206 list_add_tail(&filter->next, &priv->filters);
207 hlist_add_head(&filter->filter_chain,
208 filter_hash_bucket(priv, src_ip, dst_ip, src_port,
209 dst_port));
210
211 return filter;
212}
213
214static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
215{
216 struct mlx4_en_priv *priv = filter->priv;
217 int rc;
218
219 list_del(&filter->next);
220
221 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
222 if (rc && rc != -ENOENT)
223 en_err(priv, "Error detaching flow. rc = %d\n", rc);
224
225 kfree(filter);
226}
227
228static inline struct mlx4_en_filter *
229mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
230 __be16 src_port, __be16 dst_port)
231{
232 struct hlist_node *elem;
233 struct mlx4_en_filter *filter;
234 struct mlx4_en_filter *ret = NULL;
235
236 hlist_for_each_entry(filter, elem,
237 filter_hash_bucket(priv, src_ip, dst_ip,
238 src_port, dst_port),
239 filter_chain) {
240 if (filter->src_ip == src_ip &&
241 filter->dst_ip == dst_ip &&
242 filter->src_port == src_port &&
243 filter->dst_port == dst_port) {
244 ret = filter;
245 break;
246 }
247 }
248
249 return ret;
250}
251
252static int
253mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
254 u16 rxq_index, u32 flow_id)
255{
256 struct mlx4_en_priv *priv = netdev_priv(net_dev);
257 struct mlx4_en_filter *filter;
258 const struct iphdr *ip;
259 const __be16 *ports;
260 __be32 src_ip;
261 __be32 dst_ip;
262 __be16 src_port;
263 __be16 dst_port;
264 int nhoff = skb_network_offset(skb);
265 int ret = 0;
266
267 if (skb->protocol != htons(ETH_P_IP))
268 return -EPROTONOSUPPORT;
269
270 ip = (const struct iphdr *)(skb->data + nhoff);
271 if (ip_is_fragment(ip))
272 return -EPROTONOSUPPORT;
273
274 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
275
276 src_ip = ip->saddr;
277 dst_ip = ip->daddr;
278 src_port = ports[0];
279 dst_port = ports[1];
280
281 if (ip->protocol != IPPROTO_TCP)
282 return -EPROTONOSUPPORT;
283
284 spin_lock_bh(&priv->filters_lock);
285 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, src_port, dst_port);
286 if (filter) {
287 if (filter->rxq_index == rxq_index)
288 goto out;
289
290 filter->rxq_index = rxq_index;
291 } else {
292 filter = mlx4_en_filter_alloc(priv, rxq_index,
293 src_ip, dst_ip,
294 src_port, dst_port, flow_id);
295 if (!filter) {
296 ret = -ENOMEM;
297 goto err;
298 }
299 }
300
301 queue_work(priv->mdev->workqueue, &filter->work);
302
303out:
304 ret = filter->id;
305err:
306 spin_unlock_bh(&priv->filters_lock);
307
308 return ret;
309}
310
311void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
312 struct mlx4_en_rx_ring *rx_ring)
313{
314 struct mlx4_en_filter *filter, *tmp;
315 LIST_HEAD(del_list);
316
317 spin_lock_bh(&priv->filters_lock);
318 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
319 list_move(&filter->next, &del_list);
320 hlist_del(&filter->filter_chain);
321 }
322 spin_unlock_bh(&priv->filters_lock);
323
324 list_for_each_entry_safe(filter, tmp, &del_list, next) {
325 cancel_work_sync(&filter->work);
326 mlx4_en_filter_free(filter);
327 }
328}
329
330static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
331{
332 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
333 LIST_HEAD(del_list);
334 int i = 0;
335
336 spin_lock_bh(&priv->filters_lock);
337 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
338 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
339 break;
340
341 if (filter->activated &&
342 !work_pending(&filter->work) &&
343 rps_may_expire_flow(priv->dev,
344 filter->rxq_index, filter->flow_id,
345 filter->id)) {
346 list_move(&filter->next, &del_list);
347 hlist_del(&filter->filter_chain);
348 } else
349 last_filter = filter;
350
351 i++;
352 }
353
354 if (last_filter && (&last_filter->next != priv->filters.next))
355 list_move(&priv->filters, &last_filter->next);
356
357 spin_unlock_bh(&priv->filters_lock);
358
359 list_for_each_entry_safe(filter, tmp, &del_list, next)
360 mlx4_en_filter_free(filter);
361}
362#endif
363
69static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) 364static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
70{ 365{
71 struct mlx4_en_priv *priv = netdev_priv(dev); 366 struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -170,33 +465,81 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
170static void mlx4_en_clear_list(struct net_device *dev) 465static void mlx4_en_clear_list(struct net_device *dev)
171{ 466{
172 struct mlx4_en_priv *priv = netdev_priv(dev); 467 struct mlx4_en_priv *priv = netdev_priv(dev);
468 struct mlx4_en_mc_list *tmp, *mc_to_del;
173 469
174 kfree(priv->mc_addrs); 470 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
175 priv->mc_addrs = NULL; 471 list_del(&mc_to_del->list);
176 priv->mc_addrs_cnt = 0; 472 kfree(mc_to_del);
473 }
177} 474}
178 475
179static void mlx4_en_cache_mclist(struct net_device *dev) 476static void mlx4_en_cache_mclist(struct net_device *dev)
180{ 477{
181 struct mlx4_en_priv *priv = netdev_priv(dev); 478 struct mlx4_en_priv *priv = netdev_priv(dev);
182 struct netdev_hw_addr *ha; 479 struct netdev_hw_addr *ha;
183 char *mc_addrs; 480 struct mlx4_en_mc_list *tmp;
184 int mc_addrs_cnt = netdev_mc_count(dev);
185 int i;
186 481
187 mc_addrs = kmalloc(mc_addrs_cnt * ETH_ALEN, GFP_ATOMIC);
188 if (!mc_addrs) {
189 en_err(priv, "failed to allocate multicast list\n");
190 return;
191 }
192 i = 0;
193 netdev_for_each_mc_addr(ha, dev)
194 memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN);
195 mlx4_en_clear_list(dev); 482 mlx4_en_clear_list(dev);
196 priv->mc_addrs = mc_addrs; 483 netdev_for_each_mc_addr(ha, dev) {
197 priv->mc_addrs_cnt = mc_addrs_cnt; 484 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
485 if (!tmp) {
486 en_err(priv, "failed to allocate multicast list\n");
487 mlx4_en_clear_list(dev);
488 return;
489 }
490 memcpy(tmp->addr, ha->addr, ETH_ALEN);
491 list_add_tail(&tmp->list, &priv->mc_list);
492 }
198} 493}
199 494
495static void update_mclist_flags(struct mlx4_en_priv *priv,
496 struct list_head *dst,
497 struct list_head *src)
498{
499 struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
500 bool found;
501
502 /* Find all the entries that should be removed from dst,
503 * These are the entries that are not found in src
504 */
505 list_for_each_entry(dst_tmp, dst, list) {
506 found = false;
507 list_for_each_entry(src_tmp, src, list) {
508 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
509 found = true;
510 break;
511 }
512 }
513 if (!found)
514 dst_tmp->action = MCLIST_REM;
515 }
516
517 /* Add entries that exist in src but not in dst
518 * mark them as need to add
519 */
520 list_for_each_entry(src_tmp, src, list) {
521 found = false;
522 list_for_each_entry(dst_tmp, dst, list) {
523 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
524 dst_tmp->action = MCLIST_NONE;
525 found = true;
526 break;
527 }
528 }
529 if (!found) {
530 new_mc = kmalloc(sizeof(struct mlx4_en_mc_list),
531 GFP_KERNEL);
532 if (!new_mc) {
533 en_err(priv, "Failed to allocate current multicast list\n");
534 return;
535 }
536 memcpy(new_mc, src_tmp,
537 sizeof(struct mlx4_en_mc_list));
538 new_mc->action = MCLIST_ADD;
539 list_add_tail(&new_mc->list, dst);
540 }
541 }
542}
200 543
201static void mlx4_en_set_multicast(struct net_device *dev) 544static void mlx4_en_set_multicast(struct net_device *dev)
202{ 545{
@@ -214,9 +557,10 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
214 mcast_task); 557 mcast_task);
215 struct mlx4_en_dev *mdev = priv->mdev; 558 struct mlx4_en_dev *mdev = priv->mdev;
216 struct net_device *dev = priv->dev; 559 struct net_device *dev = priv->dev;
560 struct mlx4_en_mc_list *mclist, *tmp;
217 u64 mcast_addr = 0; 561 u64 mcast_addr = 0;
218 u8 mc_list[16] = {0}; 562 u8 mc_list[16] = {0};
219 int err; 563 int err = 0;
220 564
221 mutex_lock(&mdev->state_lock); 565 mutex_lock(&mdev->state_lock);
222 if (!mdev->device_up) { 566 if (!mdev->device_up) {
@@ -251,16 +595,46 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
251 priv->flags |= MLX4_EN_FLAG_PROMISC; 595 priv->flags |= MLX4_EN_FLAG_PROMISC;
252 596
253 /* Enable promiscouos mode */ 597 /* Enable promiscouos mode */
254 if (!(mdev->dev->caps.flags & 598 switch (mdev->dev->caps.steering_mode) {
255 MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) 599 case MLX4_STEERING_MODE_DEVICE_MANAGED:
256 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, 600 err = mlx4_flow_steer_promisc_add(mdev->dev,
257 priv->base_qpn, 1); 601 priv->port,
258 else 602 priv->base_qpn,
259 err = mlx4_unicast_promisc_add(mdev->dev, priv->base_qpn, 603 MLX4_FS_PROMISC_UPLINK);
604 if (err)
605 en_err(priv, "Failed enabling promiscuous mode\n");
606 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
607 break;
608
609 case MLX4_STEERING_MODE_B0:
610 err = mlx4_unicast_promisc_add(mdev->dev,
611 priv->base_qpn,
260 priv->port); 612 priv->port);
261 if (err) 613 if (err)
262 en_err(priv, "Failed enabling " 614 en_err(priv, "Failed enabling unicast promiscuous mode\n");
263 "promiscuous mode\n"); 615
616 /* Add the default qp number as multicast
617 * promisc
618 */
619 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
620 err = mlx4_multicast_promisc_add(mdev->dev,
621 priv->base_qpn,
622 priv->port);
623 if (err)
624 en_err(priv, "Failed enabling multicast promiscuous mode\n");
625 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
626 }
627 break;
628
629 case MLX4_STEERING_MODE_A0:
630 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
631 priv->port,
632 priv->base_qpn,
633 1);
634 if (err)
635 en_err(priv, "Failed enabling promiscuous mode\n");
636 break;
637 }
264 638
265 /* Disable port multicast filter (unconditionally) */ 639 /* Disable port multicast filter (unconditionally) */
266 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 640 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
@@ -269,15 +643,6 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
269 en_err(priv, "Failed disabling " 643 en_err(priv, "Failed disabling "
270 "multicast filter\n"); 644 "multicast filter\n");
271 645
272 /* Add the default qp number as multicast promisc */
273 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
274 err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
275 priv->port);
276 if (err)
277 en_err(priv, "Failed entering multicast promisc mode\n");
278 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
279 }
280
281 /* Disable port VLAN filter */ 646 /* Disable port VLAN filter */
282 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 647 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
283 if (err) 648 if (err)
@@ -296,22 +661,40 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
296 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 661 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
297 662
298 /* Disable promiscouos mode */ 663 /* Disable promiscouos mode */
299 if (!(mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) 664 switch (mdev->dev->caps.steering_mode) {
300 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, 665 case MLX4_STEERING_MODE_DEVICE_MANAGED:
301 priv->base_qpn, 0); 666 err = mlx4_flow_steer_promisc_remove(mdev->dev,
302 else 667 priv->port,
303 err = mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn, 668 MLX4_FS_PROMISC_UPLINK);
669 if (err)
670 en_err(priv, "Failed disabling promiscuous mode\n");
671 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
672 break;
673
674 case MLX4_STEERING_MODE_B0:
675 err = mlx4_unicast_promisc_remove(mdev->dev,
676 priv->base_qpn,
304 priv->port); 677 priv->port);
305 if (err) 678 if (err)
306 en_err(priv, "Failed disabling promiscuous mode\n"); 679 en_err(priv, "Failed disabling unicast promiscuous mode\n");
680 /* Disable Multicast promisc */
681 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
682 err = mlx4_multicast_promisc_remove(mdev->dev,
683 priv->base_qpn,
684 priv->port);
685 if (err)
686 en_err(priv, "Failed disabling multicast promiscuous mode\n");
687 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
688 }
689 break;
307 690
308 /* Disable Multicast promisc */ 691 case MLX4_STEERING_MODE_A0:
309 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 692 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
310 err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, 693 priv->port,
311 priv->port); 694 priv->base_qpn, 0);
312 if (err) 695 if (err)
313 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 696 en_err(priv, "Failed disabling promiscuous mode\n");
314 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 697 break;
315 } 698 }
316 699
317 /* Enable port VLAN filter */ 700 /* Enable port VLAN filter */
@@ -329,18 +712,46 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
329 712
330 /* Add the default qp number as multicast promisc */ 713 /* Add the default qp number as multicast promisc */
331 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 714 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
332 err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn, 715 switch (mdev->dev->caps.steering_mode) {
333 priv->port); 716 case MLX4_STEERING_MODE_DEVICE_MANAGED:
717 err = mlx4_flow_steer_promisc_add(mdev->dev,
718 priv->port,
719 priv->base_qpn,
720 MLX4_FS_PROMISC_ALL_MULTI);
721 break;
722
723 case MLX4_STEERING_MODE_B0:
724 err = mlx4_multicast_promisc_add(mdev->dev,
725 priv->base_qpn,
726 priv->port);
727 break;
728
729 case MLX4_STEERING_MODE_A0:
730 break;
731 }
334 if (err) 732 if (err)
335 en_err(priv, "Failed entering multicast promisc mode\n"); 733 en_err(priv, "Failed entering multicast promisc mode\n");
336 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 734 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
337 } 735 }
338 } else { 736 } else {
339 int i;
340 /* Disable Multicast promisc */ 737 /* Disable Multicast promisc */
341 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 738 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
342 err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, 739 switch (mdev->dev->caps.steering_mode) {
343 priv->port); 740 case MLX4_STEERING_MODE_DEVICE_MANAGED:
741 err = mlx4_flow_steer_promisc_remove(mdev->dev,
742 priv->port,
743 MLX4_FS_PROMISC_ALL_MULTI);
744 break;
745
746 case MLX4_STEERING_MODE_B0:
747 err = mlx4_multicast_promisc_remove(mdev->dev,
748 priv->base_qpn,
749 priv->port);
750 break;
751
752 case MLX4_STEERING_MODE_A0:
753 break;
754 }
344 if (err) 755 if (err)
345 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 756 en_err(priv, "Failed disabling multicast promiscuous mode\n");
346 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 757 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
@@ -351,13 +762,6 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
351 if (err) 762 if (err)
352 en_err(priv, "Failed disabling multicast filter\n"); 763 en_err(priv, "Failed disabling multicast filter\n");
353 764
354 /* Detach our qp from all the multicast addresses */
355 for (i = 0; i < priv->mc_addrs_cnt; i++) {
356 memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
357 mc_list[5] = priv->port;
358 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
359 mc_list, MLX4_PROT_ETH);
360 }
361 /* Flush mcast filter and init it with broadcast address */ 765 /* Flush mcast filter and init it with broadcast address */
362 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, 766 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
363 1, MLX4_MCAST_CONFIG); 767 1, MLX4_MCAST_CONFIG);
@@ -367,13 +771,8 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
367 netif_tx_lock_bh(dev); 771 netif_tx_lock_bh(dev);
368 mlx4_en_cache_mclist(dev); 772 mlx4_en_cache_mclist(dev);
369 netif_tx_unlock_bh(dev); 773 netif_tx_unlock_bh(dev);
370 for (i = 0; i < priv->mc_addrs_cnt; i++) { 774 list_for_each_entry(mclist, &priv->mc_list, list) {
371 mcast_addr = 775 mcast_addr = mlx4_en_mac_to_u64(mclist->addr);
372 mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN);
373 memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
374 mc_list[5] = priv->port;
375 mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp,
376 mc_list, 0, MLX4_PROT_ETH);
377 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 776 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
378 mcast_addr, 0, MLX4_MCAST_CONFIG); 777 mcast_addr, 0, MLX4_MCAST_CONFIG);
379 } 778 }
@@ -381,6 +780,40 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
381 0, MLX4_MCAST_ENABLE); 780 0, MLX4_MCAST_ENABLE);
382 if (err) 781 if (err)
383 en_err(priv, "Failed enabling multicast filter\n"); 782 en_err(priv, "Failed enabling multicast filter\n");
783
784 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
785 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
786 if (mclist->action == MCLIST_REM) {
787 /* detach this address and delete from list */
788 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
789 mc_list[5] = priv->port;
790 err = mlx4_multicast_detach(mdev->dev,
791 &priv->rss_map.indir_qp,
792 mc_list,
793 MLX4_PROT_ETH,
794 mclist->reg_id);
795 if (err)
796 en_err(priv, "Fail to detach multicast address\n");
797
798 /* remove from list */
799 list_del(&mclist->list);
800 kfree(mclist);
801 } else if (mclist->action == MCLIST_ADD) {
802 /* attach the address */
803 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
804 /* needed for B0 steering support */
805 mc_list[5] = priv->port;
806 err = mlx4_multicast_attach(mdev->dev,
807 &priv->rss_map.indir_qp,
808 mc_list,
809 priv->port, 0,
810 MLX4_PROT_ETH,
811 &mclist->reg_id);
812 if (err)
813 en_err(priv, "Fail to attach multicast address\n");
814
815 }
816 }
384 } 817 }
385out: 818out:
386 mutex_unlock(&mdev->state_lock); 819 mutex_unlock(&mdev->state_lock);
@@ -605,6 +1038,9 @@ int mlx4_en_start_port(struct net_device *dev)
605 return 0; 1038 return 0;
606 } 1039 }
607 1040
1041 INIT_LIST_HEAD(&priv->mc_list);
1042 INIT_LIST_HEAD(&priv->curr_list);
1043
608 /* Calculate Rx buf size */ 1044 /* Calculate Rx buf size */
609 dev->mtu = min(dev->mtu, priv->max_mtu); 1045 dev->mtu = min(dev->mtu, priv->max_mtu);
610 mlx4_en_calc_rx_buf(dev); 1046 mlx4_en_calc_rx_buf(dev);
@@ -653,6 +1089,10 @@ int mlx4_en_start_port(struct net_device *dev)
653 goto mac_err; 1089 goto mac_err;
654 } 1090 }
655 1091
1092 err = mlx4_en_create_drop_qp(priv);
1093 if (err)
1094 goto rss_err;
1095
656 /* Configure tx cq's and rings */ 1096 /* Configure tx cq's and rings */
657 for (i = 0; i < priv->tx_ring_num; i++) { 1097 for (i = 0; i < priv->tx_ring_num; i++) {
658 /* Configure cq */ 1098 /* Configure cq */
@@ -720,13 +1160,23 @@ int mlx4_en_start_port(struct net_device *dev)
720 1160
721 /* Attach rx QP to bradcast address */ 1161 /* Attach rx QP to bradcast address */
722 memset(&mc_list[10], 0xff, ETH_ALEN); 1162 memset(&mc_list[10], 0xff, ETH_ALEN);
723 mc_list[5] = priv->port; 1163 mc_list[5] = priv->port; /* needed for B0 steering support */
724 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 1164 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
725 0, MLX4_PROT_ETH)) 1165 priv->port, 0, MLX4_PROT_ETH,
1166 &priv->broadcast_id))
726 mlx4_warn(mdev, "Failed Attaching Broadcast\n"); 1167 mlx4_warn(mdev, "Failed Attaching Broadcast\n");
727 1168
728 /* Must redo promiscuous mode setup. */ 1169 /* Must redo promiscuous mode setup. */
729 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); 1170 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
1171 if (mdev->dev->caps.steering_mode ==
1172 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1173 mlx4_flow_steer_promisc_remove(mdev->dev,
1174 priv->port,
1175 MLX4_FS_PROMISC_UPLINK);
1176 mlx4_flow_steer_promisc_remove(mdev->dev,
1177 priv->port,
1178 MLX4_FS_PROMISC_ALL_MULTI);
1179 }
730 1180
731 /* Schedule multicast task to populate multicast list */ 1181 /* Schedule multicast task to populate multicast list */
732 queue_work(mdev->workqueue, &priv->mcast_task); 1182 queue_work(mdev->workqueue, &priv->mcast_task);
@@ -742,7 +1192,8 @@ tx_err:
742 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]); 1192 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
743 mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]); 1193 mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]);
744 } 1194 }
745 1195 mlx4_en_destroy_drop_qp(priv);
1196rss_err:
746 mlx4_en_release_rss_steer(priv); 1197 mlx4_en_release_rss_steer(priv);
747mac_err: 1198mac_err:
748 mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn); 1199 mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
@@ -760,6 +1211,7 @@ void mlx4_en_stop_port(struct net_device *dev)
760{ 1211{
761 struct mlx4_en_priv *priv = netdev_priv(dev); 1212 struct mlx4_en_priv *priv = netdev_priv(dev);
762 struct mlx4_en_dev *mdev = priv->mdev; 1213 struct mlx4_en_dev *mdev = priv->mdev;
1214 struct mlx4_en_mc_list *mclist, *tmp;
763 int i; 1215 int i;
764 u8 mc_list[16] = {0}; 1216 u8 mc_list[16] = {0};
765 1217
@@ -778,19 +1230,26 @@ void mlx4_en_stop_port(struct net_device *dev)
778 1230
779 /* Detach All multicasts */ 1231 /* Detach All multicasts */
780 memset(&mc_list[10], 0xff, ETH_ALEN); 1232 memset(&mc_list[10], 0xff, ETH_ALEN);
781 mc_list[5] = priv->port; 1233 mc_list[5] = priv->port; /* needed for B0 steering support */
782 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 1234 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
783 MLX4_PROT_ETH); 1235 MLX4_PROT_ETH, priv->broadcast_id);
784 for (i = 0; i < priv->mc_addrs_cnt; i++) { 1236 list_for_each_entry(mclist, &priv->curr_list, list) {
785 memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN); 1237 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
786 mc_list[5] = priv->port; 1238 mc_list[5] = priv->port;
787 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, 1239 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
788 mc_list, MLX4_PROT_ETH); 1240 mc_list, MLX4_PROT_ETH, mclist->reg_id);
789 } 1241 }
790 mlx4_en_clear_list(dev); 1242 mlx4_en_clear_list(dev);
1243 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1244 list_del(&mclist->list);
1245 kfree(mclist);
1246 }
1247
791 /* Flush multicast filter */ 1248 /* Flush multicast filter */
792 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); 1249 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
793 1250
1251 mlx4_en_destroy_drop_qp(priv);
1252
794 /* Free TX Rings */ 1253 /* Free TX Rings */
795 for (i = 0; i < priv->tx_ring_num; i++) { 1254 for (i = 0; i < priv->tx_ring_num; i++) {
796 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]); 1255 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
@@ -915,6 +1374,11 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
915{ 1374{
916 int i; 1375 int i;
917 1376
1377#ifdef CONFIG_RFS_ACCEL
1378 free_irq_cpu_rmap(priv->dev->rx_cpu_rmap);
1379 priv->dev->rx_cpu_rmap = NULL;
1380#endif
1381
918 for (i = 0; i < priv->tx_ring_num; i++) { 1382 for (i = 0; i < priv->tx_ring_num; i++) {
919 if (priv->tx_ring[i].tx_info) 1383 if (priv->tx_ring[i].tx_info)
920 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 1384 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
@@ -970,6 +1434,15 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
970 goto err; 1434 goto err;
971 } 1435 }
972 1436
1437#ifdef CONFIG_RFS_ACCEL
1438 priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->rx_ring_num);
1439 if (!priv->dev->rx_cpu_rmap)
1440 goto err;
1441
1442 INIT_LIST_HEAD(&priv->filters);
1443 spin_lock_init(&priv->filters_lock);
1444#endif
1445
973 return 0; 1446 return 0;
974 1447
975err: 1448err:
@@ -1077,6 +1550,9 @@ static const struct net_device_ops mlx4_netdev_ops = {
1077#endif 1550#endif
1078 .ndo_set_features = mlx4_en_set_features, 1551 .ndo_set_features = mlx4_en_set_features,
1079 .ndo_setup_tc = mlx4_en_setup_tc, 1552 .ndo_setup_tc = mlx4_en_setup_tc,
1553#ifdef CONFIG_RFS_ACCEL
1554 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
1555#endif
1080}; 1556};
1081 1557
1082int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, 1558int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -1194,6 +1670,10 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1194 NETIF_F_HW_VLAN_FILTER; 1670 NETIF_F_HW_VLAN_FILTER;
1195 dev->hw_features |= NETIF_F_LOOPBACK; 1671 dev->hw_features |= NETIF_F_LOOPBACK;
1196 1672
1673 if (mdev->dev->caps.steering_mode ==
1674 MLX4_STEERING_MODE_DEVICE_MANAGED)
1675 dev->hw_features |= NETIF_F_NTUPLE;
1676
1197 mdev->pndev[port] = dev; 1677 mdev->pndev[port] = dev;
1198 1678
1199 netif_carrier_off(dev); 1679 netif_carrier_off(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index d49a7ac3187d..f32e70300770 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -41,41 +41,75 @@
41 41
42#include "mlx4_en.h" 42#include "mlx4_en.h"
43 43
44 44static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
45static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv, 45 struct mlx4_en_rx_desc *rx_desc,
46 struct mlx4_en_rx_desc *rx_desc, 46 struct mlx4_en_rx_alloc *frags,
47 struct page_frag *skb_frags, 47 struct mlx4_en_rx_alloc *ring_alloc)
48 struct mlx4_en_rx_alloc *ring_alloc,
49 int i)
50{ 48{
51 struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; 49 struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
52 struct mlx4_en_rx_alloc *page_alloc = &ring_alloc[i]; 50 struct mlx4_en_frag_info *frag_info;
53 struct page *page; 51 struct page *page;
54 dma_addr_t dma; 52 dma_addr_t dma;
53 int i;
55 54
56 if (page_alloc->offset == frag_info->last_offset) { 55 for (i = 0; i < priv->num_frags; i++) {
57 /* Allocate new page */ 56 frag_info = &priv->frag_info[i];
58 page = alloc_pages(GFP_ATOMIC | __GFP_COMP, MLX4_EN_ALLOC_ORDER); 57 if (ring_alloc[i].offset == frag_info->last_offset) {
59 if (!page) 58 page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
60 return -ENOMEM; 59 MLX4_EN_ALLOC_ORDER);
61 60 if (!page)
62 skb_frags[i].page = page_alloc->page; 61 goto out;
63 skb_frags[i].offset = page_alloc->offset; 62 dma = dma_map_page(priv->ddev, page, 0,
64 page_alloc->page = page; 63 MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
65 page_alloc->offset = frag_info->frag_align; 64 if (dma_mapping_error(priv->ddev, dma)) {
66 } else { 65 put_page(page);
67 page = page_alloc->page; 66 goto out;
68 get_page(page); 67 }
68 page_alloc[i].page = page;
69 page_alloc[i].dma = dma;
70 page_alloc[i].offset = frag_info->frag_align;
71 } else {
72 page_alloc[i].page = ring_alloc[i].page;
73 get_page(ring_alloc[i].page);
74 page_alloc[i].dma = ring_alloc[i].dma;
75 page_alloc[i].offset = ring_alloc[i].offset +
76 frag_info->frag_stride;
77 }
78 }
69 79
70 skb_frags[i].page = page; 80 for (i = 0; i < priv->num_frags; i++) {
71 skb_frags[i].offset = page_alloc->offset; 81 frags[i] = ring_alloc[i];
72 page_alloc->offset += frag_info->frag_stride; 82 dma = ring_alloc[i].dma + ring_alloc[i].offset;
83 ring_alloc[i] = page_alloc[i];
84 rx_desc->data[i].addr = cpu_to_be64(dma);
73 } 85 }
74 dma = dma_map_single(priv->ddev, page_address(skb_frags[i].page) + 86
75 skb_frags[i].offset, frag_info->frag_size,
76 PCI_DMA_FROMDEVICE);
77 rx_desc->data[i].addr = cpu_to_be64(dma);
78 return 0; 87 return 0;
88
89
90out:
91 while (i--) {
92 frag_info = &priv->frag_info[i];
93 if (ring_alloc[i].offset == frag_info->last_offset)
94 dma_unmap_page(priv->ddev, page_alloc[i].dma,
95 MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
96 put_page(page_alloc[i].page);
97 }
98 return -ENOMEM;
99}
100
101static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
102 struct mlx4_en_rx_alloc *frags,
103 int i)
104{
105 struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
106
107 if (frags[i].offset == frag_info->last_offset) {
108 dma_unmap_page(priv->ddev, frags[i].dma, MLX4_EN_ALLOC_SIZE,
109 PCI_DMA_FROMDEVICE);
110 }
111 if (frags[i].page)
112 put_page(frags[i].page);
79} 113}
80 114
81static int mlx4_en_init_allocator(struct mlx4_en_priv *priv, 115static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
@@ -91,6 +125,13 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
91 if (!page_alloc->page) 125 if (!page_alloc->page)
92 goto out; 126 goto out;
93 127
128 page_alloc->dma = dma_map_page(priv->ddev, page_alloc->page, 0,
129 MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
130 if (dma_mapping_error(priv->ddev, page_alloc->dma)) {
131 put_page(page_alloc->page);
132 page_alloc->page = NULL;
133 goto out;
134 }
94 page_alloc->offset = priv->frag_info[i].frag_align; 135 page_alloc->offset = priv->frag_info[i].frag_align;
95 en_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n", 136 en_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n",
96 i, page_alloc->page); 137 i, page_alloc->page);
@@ -100,6 +141,8 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
100out: 141out:
101 while (i--) { 142 while (i--) {
102 page_alloc = &ring->page_alloc[i]; 143 page_alloc = &ring->page_alloc[i];
144 dma_unmap_page(priv->ddev, page_alloc->dma,
145 MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
103 put_page(page_alloc->page); 146 put_page(page_alloc->page);
104 page_alloc->page = NULL; 147 page_alloc->page = NULL;
105 } 148 }
@@ -117,24 +160,22 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
117 en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n", 160 en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
118 i, page_count(page_alloc->page)); 161 i, page_count(page_alloc->page));
119 162
163 dma_unmap_page(priv->ddev, page_alloc->dma,
164 MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
120 put_page(page_alloc->page); 165 put_page(page_alloc->page);
121 page_alloc->page = NULL; 166 page_alloc->page = NULL;
122 } 167 }
123} 168}
124 169
125
126static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv, 170static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
127 struct mlx4_en_rx_ring *ring, int index) 171 struct mlx4_en_rx_ring *ring, int index)
128{ 172{
129 struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index; 173 struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
130 struct skb_frag_struct *skb_frags = ring->rx_info +
131 (index << priv->log_rx_info);
132 int possible_frags; 174 int possible_frags;
133 int i; 175 int i;
134 176
135 /* Set size and memtype fields */ 177 /* Set size and memtype fields */
136 for (i = 0; i < priv->num_frags; i++) { 178 for (i = 0; i < priv->num_frags; i++) {
137 skb_frag_size_set(&skb_frags[i], priv->frag_info[i].frag_size);
138 rx_desc->data[i].byte_count = 179 rx_desc->data[i].byte_count =
139 cpu_to_be32(priv->frag_info[i].frag_size); 180 cpu_to_be32(priv->frag_info[i].frag_size);
140 rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key); 181 rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key);
@@ -151,29 +192,14 @@ static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
151 } 192 }
152} 193}
153 194
154
155static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, 195static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
156 struct mlx4_en_rx_ring *ring, int index) 196 struct mlx4_en_rx_ring *ring, int index)
157{ 197{
158 struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride); 198 struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
159 struct page_frag *skb_frags = ring->rx_info + 199 struct mlx4_en_rx_alloc *frags = ring->rx_info +
160 (index << priv->log_rx_info); 200 (index << priv->log_rx_info);
161 int i;
162 201
163 for (i = 0; i < priv->num_frags; i++) 202 return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc);
164 if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, ring->page_alloc, i))
165 goto err;
166
167 return 0;
168
169err:
170 while (i--) {
171 dma_addr_t dma = be64_to_cpu(rx_desc->data[i].addr);
172 pci_unmap_single(priv->mdev->pdev, dma, skb_frags[i].size,
173 PCI_DMA_FROMDEVICE);
174 put_page(skb_frags[i].page);
175 }
176 return -ENOMEM;
177} 203}
178 204
179static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring) 205static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
@@ -185,20 +211,13 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
185 struct mlx4_en_rx_ring *ring, 211 struct mlx4_en_rx_ring *ring,
186 int index) 212 int index)
187{ 213{
188 struct page_frag *skb_frags; 214 struct mlx4_en_rx_alloc *frags;
189 struct mlx4_en_rx_desc *rx_desc = ring->buf + (index << ring->log_stride);
190 dma_addr_t dma;
191 int nr; 215 int nr;
192 216
193 skb_frags = ring->rx_info + (index << priv->log_rx_info); 217 frags = ring->rx_info + (index << priv->log_rx_info);
194 for (nr = 0; nr < priv->num_frags; nr++) { 218 for (nr = 0; nr < priv->num_frags; nr++) {
195 en_dbg(DRV, priv, "Freeing fragment:%d\n", nr); 219 en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
196 dma = be64_to_cpu(rx_desc->data[nr].addr); 220 mlx4_en_free_frag(priv, frags, nr);
197
198 en_dbg(DRV, priv, "Unmapping buffer at dma:0x%llx\n", (u64) dma);
199 dma_unmap_single(priv->ddev, dma, skb_frags[nr].size,
200 PCI_DMA_FROMDEVICE);
201 put_page(skb_frags[nr].page);
202 } 221 }
203} 222}
204 223
@@ -268,10 +287,9 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
268 struct mlx4_en_rx_ring *ring, u32 size, u16 stride) 287 struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
269{ 288{
270 struct mlx4_en_dev *mdev = priv->mdev; 289 struct mlx4_en_dev *mdev = priv->mdev;
271 int err; 290 int err = -ENOMEM;
272 int tmp; 291 int tmp;
273 292
274
275 ring->prod = 0; 293 ring->prod = 0;
276 ring->cons = 0; 294 ring->cons = 0;
277 ring->size = size; 295 ring->size = size;
@@ -281,7 +299,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
281 ring->buf_size = ring->size * ring->stride + TXBB_SIZE; 299 ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
282 300
283 tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * 301 tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
284 sizeof(struct skb_frag_struct)); 302 sizeof(struct mlx4_en_rx_alloc));
285 ring->rx_info = vmalloc(tmp); 303 ring->rx_info = vmalloc(tmp);
286 if (!ring->rx_info) 304 if (!ring->rx_info)
287 return -ENOMEM; 305 return -ENOMEM;
@@ -338,7 +356,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
338 memset(ring->buf, 0, ring->buf_size); 356 memset(ring->buf, 0, ring->buf_size);
339 mlx4_en_update_rx_prod_db(ring); 357 mlx4_en_update_rx_prod_db(ring);
340 358
341 /* Initailize all descriptors */ 359 /* Initialize all descriptors */
342 for (i = 0; i < ring->size; i++) 360 for (i = 0; i < ring->size; i++)
343 mlx4_en_init_rx_desc(priv, ring, i); 361 mlx4_en_init_rx_desc(priv, ring, i);
344 362
@@ -389,6 +407,9 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
389 mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE); 407 mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
390 vfree(ring->rx_info); 408 vfree(ring->rx_info);
391 ring->rx_info = NULL; 409 ring->rx_info = NULL;
410#ifdef CONFIG_RFS_ACCEL
411 mlx4_en_cleanup_filters(priv, ring);
412#endif
392} 413}
393 414
394void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, 415void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
@@ -401,12 +422,10 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
401} 422}
402 423
403 424
404/* Unmap a completed descriptor and free unused pages */
405static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, 425static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
406 struct mlx4_en_rx_desc *rx_desc, 426 struct mlx4_en_rx_desc *rx_desc,
407 struct page_frag *skb_frags, 427 struct mlx4_en_rx_alloc *frags,
408 struct sk_buff *skb, 428 struct sk_buff *skb,
409 struct mlx4_en_rx_alloc *page_alloc,
410 int length) 429 int length)
411{ 430{
412 struct skb_frag_struct *skb_frags_rx = skb_shinfo(skb)->frags; 431 struct skb_frag_struct *skb_frags_rx = skb_shinfo(skb)->frags;
@@ -414,26 +433,24 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
414 int nr; 433 int nr;
415 dma_addr_t dma; 434 dma_addr_t dma;
416 435
417 /* Collect used fragments while replacing them in the HW descirptors */ 436 /* Collect used fragments while replacing them in the HW descriptors */
418 for (nr = 0; nr < priv->num_frags; nr++) { 437 for (nr = 0; nr < priv->num_frags; nr++) {
419 frag_info = &priv->frag_info[nr]; 438 frag_info = &priv->frag_info[nr];
420 if (length <= frag_info->frag_prefix_size) 439 if (length <= frag_info->frag_prefix_size)
421 break; 440 break;
441 if (!frags[nr].page)
442 goto fail;
422 443
423 /* Save page reference in skb */
424 __skb_frag_set_page(&skb_frags_rx[nr], skb_frags[nr].page);
425 skb_frag_size_set(&skb_frags_rx[nr], skb_frags[nr].size);
426 skb_frags_rx[nr].page_offset = skb_frags[nr].offset;
427 skb->truesize += frag_info->frag_stride;
428 dma = be64_to_cpu(rx_desc->data[nr].addr); 444 dma = be64_to_cpu(rx_desc->data[nr].addr);
445 dma_sync_single_for_cpu(priv->ddev, dma, frag_info->frag_size,
446 DMA_FROM_DEVICE);
429 447
430 /* Allocate a replacement page */ 448 /* Save page reference in skb */
431 if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, page_alloc, nr)) 449 get_page(frags[nr].page);
432 goto fail; 450 __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page);
433 451 skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size);
434 /* Unmap buffer */ 452 skb_frags_rx[nr].page_offset = frags[nr].offset;
435 dma_unmap_single(priv->ddev, dma, skb_frag_size(&skb_frags_rx[nr]), 453 skb->truesize += frag_info->frag_stride;
436 PCI_DMA_FROMDEVICE);
437 } 454 }
438 /* Adjust size of last fragment to match actual length */ 455 /* Adjust size of last fragment to match actual length */
439 if (nr > 0) 456 if (nr > 0)
@@ -442,8 +459,6 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
442 return nr; 459 return nr;
443 460
444fail: 461fail:
445 /* Drop all accumulated fragments (which have already been replaced in
446 * the descriptor) of this packet; remaining fragments are reused... */
447 while (nr > 0) { 462 while (nr > 0) {
448 nr--; 463 nr--;
449 __skb_frag_unref(&skb_frags_rx[nr]); 464 __skb_frag_unref(&skb_frags_rx[nr]);
@@ -454,8 +469,7 @@ fail:
454 469
455static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv, 470static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
456 struct mlx4_en_rx_desc *rx_desc, 471 struct mlx4_en_rx_desc *rx_desc,
457 struct page_frag *skb_frags, 472 struct mlx4_en_rx_alloc *frags,
458 struct mlx4_en_rx_alloc *page_alloc,
459 unsigned int length) 473 unsigned int length)
460{ 474{
461 struct sk_buff *skb; 475 struct sk_buff *skb;
@@ -473,23 +487,20 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
473 487
474 /* Get pointer to first fragment so we could copy the headers into the 488 /* Get pointer to first fragment so we could copy the headers into the
475 * (linear part of the) skb */ 489 * (linear part of the) skb */
476 va = page_address(skb_frags[0].page) + skb_frags[0].offset; 490 va = page_address(frags[0].page) + frags[0].offset;
477 491
478 if (length <= SMALL_PACKET_SIZE) { 492 if (length <= SMALL_PACKET_SIZE) {
479 /* We are copying all relevant data to the skb - temporarily 493 /* We are copying all relevant data to the skb - temporarily
480 * synch buffers for the copy */ 494 * sync buffers for the copy */
481 dma = be64_to_cpu(rx_desc->data[0].addr); 495 dma = be64_to_cpu(rx_desc->data[0].addr);
482 dma_sync_single_for_cpu(priv->ddev, dma, length, 496 dma_sync_single_for_cpu(priv->ddev, dma, length,
483 DMA_FROM_DEVICE); 497 DMA_FROM_DEVICE);
484 skb_copy_to_linear_data(skb, va, length); 498 skb_copy_to_linear_data(skb, va, length);
485 dma_sync_single_for_device(priv->ddev, dma, length,
486 DMA_FROM_DEVICE);
487 skb->tail += length; 499 skb->tail += length;
488 } else { 500 } else {
489
490 /* Move relevant fragments to skb */ 501 /* Move relevant fragments to skb */
491 used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags, 502 used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, frags,
492 skb, page_alloc, length); 503 skb, length);
493 if (unlikely(!used_frags)) { 504 if (unlikely(!used_frags)) {
494 kfree_skb(skb); 505 kfree_skb(skb);
495 return NULL; 506 return NULL;
@@ -526,12 +537,25 @@ out_loopback:
526 dev_kfree_skb_any(skb); 537 dev_kfree_skb_any(skb);
527} 538}
528 539
540static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
541 struct mlx4_en_rx_ring *ring)
542{
543 int index = ring->prod & ring->size_mask;
544
545 while ((u32) (ring->prod - ring->cons) < ring->actual_size) {
546 if (mlx4_en_prepare_rx_desc(priv, ring, index))
547 break;
548 ring->prod++;
549 index = ring->prod & ring->size_mask;
550 }
551}
552
529int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) 553int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
530{ 554{
531 struct mlx4_en_priv *priv = netdev_priv(dev); 555 struct mlx4_en_priv *priv = netdev_priv(dev);
532 struct mlx4_cqe *cqe; 556 struct mlx4_cqe *cqe;
533 struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring]; 557 struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
534 struct page_frag *skb_frags; 558 struct mlx4_en_rx_alloc *frags;
535 struct mlx4_en_rx_desc *rx_desc; 559 struct mlx4_en_rx_desc *rx_desc;
536 struct sk_buff *skb; 560 struct sk_buff *skb;
537 int index; 561 int index;
@@ -540,6 +564,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
540 int polled = 0; 564 int polled = 0;
541 int ip_summed; 565 int ip_summed;
542 struct ethhdr *ethh; 566 struct ethhdr *ethh;
567 dma_addr_t dma;
543 u64 s_mac; 568 u64 s_mac;
544 569
545 if (!priv->port_up) 570 if (!priv->port_up)
@@ -555,7 +580,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
555 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, 580 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
556 cq->mcq.cons_index & cq->size)) { 581 cq->mcq.cons_index & cq->size)) {
557 582
558 skb_frags = ring->rx_info + (index << priv->log_rx_info); 583 frags = ring->rx_info + (index << priv->log_rx_info);
559 rx_desc = ring->buf + (index << ring->log_stride); 584 rx_desc = ring->buf + (index << ring->log_stride);
560 585
561 /* 586 /*
@@ -579,8 +604,11 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
579 604
580 /* Get pointer to first fragment since we haven't skb yet and 605 /* Get pointer to first fragment since we haven't skb yet and
581 * cast it to ethhdr struct */ 606 * cast it to ethhdr struct */
582 ethh = (struct ethhdr *)(page_address(skb_frags[0].page) + 607 dma = be64_to_cpu(rx_desc->data[0].addr);
583 skb_frags[0].offset); 608 dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
609 DMA_FROM_DEVICE);
610 ethh = (struct ethhdr *)(page_address(frags[0].page) +
611 frags[0].offset);
584 s_mac = mlx4_en_mac_to_u64(ethh->h_source); 612 s_mac = mlx4_en_mac_to_u64(ethh->h_source);
585 613
586 /* If source MAC is equal to our own MAC and not performing 614 /* If source MAC is equal to our own MAC and not performing
@@ -612,10 +640,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
612 if (!gro_skb) 640 if (!gro_skb)
613 goto next; 641 goto next;
614 642
615 nr = mlx4_en_complete_rx_desc( 643 nr = mlx4_en_complete_rx_desc(priv,
616 priv, rx_desc, 644 rx_desc, frags, gro_skb,
617 skb_frags, gro_skb, 645 length);
618 ring->page_alloc, length);
619 if (!nr) 646 if (!nr)
620 goto next; 647 goto next;
621 648
@@ -651,8 +678,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
651 ring->csum_none++; 678 ring->csum_none++;
652 } 679 }
653 680
654 skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags, 681 skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
655 ring->page_alloc, length);
656 if (!skb) { 682 if (!skb) {
657 priv->stats.rx_dropped++; 683 priv->stats.rx_dropped++;
658 goto next; 684 goto next;
@@ -678,6 +704,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
678 netif_receive_skb(skb); 704 netif_receive_skb(skb);
679 705
680next: 706next:
707 for (nr = 0; nr < priv->num_frags; nr++)
708 mlx4_en_free_frag(priv, frags, nr);
709
681 ++cq->mcq.cons_index; 710 ++cq->mcq.cons_index;
682 index = (cq->mcq.cons_index) & ring->size_mask; 711 index = (cq->mcq.cons_index) & ring->size_mask;
683 cqe = &cq->buf[index]; 712 cqe = &cq->buf[index];
@@ -693,7 +722,7 @@ out:
693 mlx4_cq_set_ci(&cq->mcq); 722 mlx4_cq_set_ci(&cq->mcq);
694 wmb(); /* ensure HW sees CQ consumer before we post new buffers */ 723 wmb(); /* ensure HW sees CQ consumer before we post new buffers */
695 ring->cons = cq->mcq.cons_index; 724 ring->cons = cq->mcq.cons_index;
696 ring->prod += polled; /* Polled descriptors were realocated in place */ 725 mlx4_en_refill_rx_buffers(priv, ring);
697 mlx4_en_update_rx_prod_db(ring); 726 mlx4_en_update_rx_prod_db(ring);
698 return polled; 727 return polled;
699} 728}
@@ -782,7 +811,7 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
782 811
783 priv->num_frags = i; 812 priv->num_frags = i;
784 priv->rx_skb_size = eff_mtu; 813 priv->rx_skb_size = eff_mtu;
785 priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct)); 814 priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc));
786 815
787 en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d " 816 en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
788 "num_frags:%d):\n", eff_mtu, priv->num_frags); 817 "num_frags:%d):\n", eff_mtu, priv->num_frags);
@@ -844,6 +873,36 @@ out:
844 return err; 873 return err;
845} 874}
846 875
876int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv)
877{
878 int err;
879 u32 qpn;
880
881 err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn);
882 if (err) {
883 en_err(priv, "Failed reserving drop qpn\n");
884 return err;
885 }
886 err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp);
887 if (err) {
888 en_err(priv, "Failed allocating drop qp\n");
889 mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
890 return err;
891 }
892
893 return 0;
894}
895
896void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv)
897{
898 u32 qpn;
899
900 qpn = priv->drop_qp.qpn;
901 mlx4_qp_remove(priv->mdev->dev, &priv->drop_qp);
902 mlx4_qp_free(priv->mdev->dev, &priv->drop_qp);
903 mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
904}
905
847/* Allocate rx qp's and configure them according to rss map */ 906/* Allocate rx qp's and configure them according to rss map */
848int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) 907int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
849{ 908{
@@ -954,8 +1013,3 @@ void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
954 } 1013 }
955 mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num); 1014 mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
956} 1015}
957
958
959
960
961
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index bce98d9c0039..cd48337cbfc0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -39,6 +39,7 @@
39#include <linux/dma-mapping.h> 39#include <linux/dma-mapping.h>
40 40
41#include <linux/mlx4/cmd.h> 41#include <linux/mlx4/cmd.h>
42#include <linux/cpu_rmap.h>
42 43
43#include "mlx4.h" 44#include "mlx4.h"
44#include "fw.h" 45#include "fw.h"
@@ -1060,7 +1061,8 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
1060} 1061}
1061EXPORT_SYMBOL(mlx4_test_interrupts); 1062EXPORT_SYMBOL(mlx4_test_interrupts);
1062 1063
1063int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector) 1064int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
1065 int *vector)
1064{ 1066{
1065 1067
1066 struct mlx4_priv *priv = mlx4_priv(dev); 1068 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1074,6 +1076,14 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector)
1074 snprintf(priv->eq_table.irq_names + 1076 snprintf(priv->eq_table.irq_names +
1075 vec * MLX4_IRQNAME_SIZE, 1077 vec * MLX4_IRQNAME_SIZE,
1076 MLX4_IRQNAME_SIZE, "%s", name); 1078 MLX4_IRQNAME_SIZE, "%s", name);
1079#ifdef CONFIG_RFS_ACCEL
1080 if (rmap) {
1081 err = irq_cpu_rmap_add(rmap,
1082 priv->eq_table.eq[vec].irq);
1083 if (err)
1084 mlx4_warn(dev, "Failed adding irq rmap\n");
1085 }
1086#endif
1077 err = request_irq(priv->eq_table.eq[vec].irq, 1087 err = request_irq(priv->eq_table.eq[vec].irq,
1078 mlx4_msi_x_interrupt, 0, 1088 mlx4_msi_x_interrupt, 0,
1079 &priv->eq_table.irq_names[vec<<5], 1089 &priv->eq_table.irq_names[vec<<5],
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 9c83bb8151ea..1d70657058a5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -123,7 +123,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
123 static const char * const fname[] = { 123 static const char * const fname[] = {
124 [0] = "RSS support", 124 [0] = "RSS support",
125 [1] = "RSS Toeplitz Hash Function support", 125 [1] = "RSS Toeplitz Hash Function support",
126 [2] = "RSS XOR Hash Function support" 126 [2] = "RSS XOR Hash Function support",
127 [3] = "Device manage flow steering support"
127 }; 128 };
128 int i; 129 int i;
129 130
@@ -391,6 +392,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
391#define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66 392#define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66
392#define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67 393#define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67
393#define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68 394#define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68
395#define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
396#define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
394#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 397#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
395#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 398#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
396#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84 399#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
@@ -474,6 +477,12 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
474 dev_cap->num_ports = field & 0xf; 477 dev_cap->num_ports = field & 0xf;
475 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET); 478 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET);
476 dev_cap->max_msg_sz = 1 << (field & 0x1f); 479 dev_cap->max_msg_sz = 1 << (field & 0x1f);
480 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
481 if (field & 0x80)
482 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN;
483 dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f;
484 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET);
485 dev_cap->fs_max_num_qp_per_entry = field;
477 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); 486 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
478 dev_cap->stat_rate_support = stat_rate; 487 dev_cap->stat_rate_support = stat_rate;
479 MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 488 MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
@@ -1061,6 +1070,15 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1061#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16) 1070#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
1062#define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18) 1071#define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
1063#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) 1072#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
1073#define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6
1074#define INIT_HCA_FS_PARAM_OFFSET 0x1d0
1075#define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00)
1076#define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12)
1077#define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
1078#define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21)
1079#define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22)
1080#define INIT_HCA_FS_IB_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x25)
1081#define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26)
1064#define INIT_HCA_TPT_OFFSET 0x0f0 1082#define INIT_HCA_TPT_OFFSET 0x0f0
1065#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) 1083#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
1066#define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b) 1084#define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
@@ -1119,14 +1137,44 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1119 MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET); 1137 MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET);
1120 MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET); 1138 MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
1121 1139
1122 /* multicast attributes */ 1140 /* steering attributes */
1123 1141 if (dev->caps.steering_mode ==
1124 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); 1142 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1125 MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); 1143 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |=
1126 MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET); 1144 cpu_to_be32(1 <<
1127 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 1145 INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN);
1128 MLX4_PUT(inbox, (u8) (1 << 3), INIT_HCA_UC_STEERING_OFFSET); 1146
1129 MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); 1147 MLX4_PUT(inbox, param->mc_base, INIT_HCA_FS_BASE_OFFSET);
1148 MLX4_PUT(inbox, param->log_mc_entry_sz,
1149 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
1150 MLX4_PUT(inbox, param->log_mc_table_sz,
1151 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
1152 /* Enable Ethernet flow steering
1153 * with udp unicast and tcp unicast
1154 */
1155 MLX4_PUT(inbox, param->fs_hash_enable_bits,
1156 INIT_HCA_FS_ETH_BITS_OFFSET);
1157 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
1158 INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET);
1159 /* Enable IPoIB flow steering
1160 * with udp unicast and tcp unicast
1161 */
1162 MLX4_PUT(inbox, param->fs_hash_enable_bits,
1163 INIT_HCA_FS_IB_BITS_OFFSET);
1164 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
1165 INIT_HCA_FS_IB_NUM_ADDRS_OFFSET);
1166 } else {
1167 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
1168 MLX4_PUT(inbox, param->log_mc_entry_sz,
1169 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1170 MLX4_PUT(inbox, param->log_mc_hash_sz,
1171 INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
1172 MLX4_PUT(inbox, param->log_mc_table_sz,
1173 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1174 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0)
1175 MLX4_PUT(inbox, (u8) (1 << 3),
1176 INIT_HCA_UC_STEERING_OFFSET);
1177 }
1130 1178
1131 /* TPT attributes */ 1179 /* TPT attributes */
1132 1180
@@ -1188,15 +1236,24 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
1188 MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET); 1236 MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
1189 MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET); 1237 MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
1190 1238
1191 /* multicast attributes */ 1239 /* steering attributes */
1240 if (dev->caps.steering_mode ==
1241 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1192 1242
1193 MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET); 1243 MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
1194 MLX4_GET(param->log_mc_entry_sz, outbox, 1244 MLX4_GET(param->log_mc_entry_sz, outbox,
1195 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); 1245 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
1196 MLX4_GET(param->log_mc_hash_sz, outbox, 1246 MLX4_GET(param->log_mc_table_sz, outbox,
1197 INIT_HCA_LOG_MC_HASH_SZ_OFFSET); 1247 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
1198 MLX4_GET(param->log_mc_table_sz, outbox, 1248 } else {
1199 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); 1249 MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
1250 MLX4_GET(param->log_mc_entry_sz, outbox,
1251 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1252 MLX4_GET(param->log_mc_hash_sz, outbox,
1253 INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
1254 MLX4_GET(param->log_mc_table_sz, outbox,
1255 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1256 }
1200 1257
1201 /* TPT attributes */ 1258 /* TPT attributes */
1202 1259
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 64c0399e4b78..83fcbbf1b169 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -78,6 +78,8 @@ struct mlx4_dev_cap {
78 u16 wavelength[MLX4_MAX_PORTS + 1]; 78 u16 wavelength[MLX4_MAX_PORTS + 1];
79 u64 trans_code[MLX4_MAX_PORTS + 1]; 79 u64 trans_code[MLX4_MAX_PORTS + 1];
80 u16 stat_rate_support; 80 u16 stat_rate_support;
81 int fs_log_max_ucast_qp_range_size;
82 int fs_max_num_qp_per_entry;
81 u64 flags; 83 u64 flags;
82 u64 flags2; 84 u64 flags2;
83 int reserved_uars; 85 int reserved_uars;
@@ -165,6 +167,7 @@ struct mlx4_init_hca_param {
165 u8 log_mpt_sz; 167 u8 log_mpt_sz;
166 u8 log_uar_sz; 168 u8 log_uar_sz;
167 u8 uar_page_sz; /* log pg sz in 4k chunks */ 169 u8 uar_page_sz; /* log pg sz in 4k chunks */
170 u8 fs_hash_enable_bits;
168}; 171};
169 172
170struct mlx4_init_ib_param { 173struct mlx4_init_ib_param {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index a0313de122de..42645166bae2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -41,6 +41,7 @@
41#include <linux/slab.h> 41#include <linux/slab.h>
42#include <linux/io-mapping.h> 42#include <linux/io-mapping.h>
43#include <linux/delay.h> 43#include <linux/delay.h>
44#include <linux/netdevice.h>
44 45
45#include <linux/mlx4/device.h> 46#include <linux/mlx4/device.h>
46#include <linux/mlx4/doorbell.h> 47#include <linux/mlx4/doorbell.h>
@@ -90,7 +91,9 @@ module_param_named(log_num_mgm_entry_size,
90MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" 91MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
91 " of qp per mcg, for example:" 92 " of qp per mcg, for example:"
92 " 10 gives 248.range: 9<=" 93 " 10 gives 248.range: 9<="
93 " log_num_mgm_entry_size <= 12"); 94 " log_num_mgm_entry_size <= 12."
95 " Not in use with device managed"
96 " flow steering");
94 97
95#define MLX4_VF (1 << 0) 98#define MLX4_VF (1 << 0)
96 99
@@ -243,7 +246,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
243 dev->caps.reserved_srqs = dev_cap->reserved_srqs; 246 dev->caps.reserved_srqs = dev_cap->reserved_srqs;
244 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz; 247 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
245 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; 248 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
246 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
247 /* 249 /*
248 * Subtract 1 from the limit because we need to allocate a 250 * Subtract 1 from the limit because we need to allocate a
249 * spare CQE so the HCA HW can tell the difference between an 251 * spare CQE so the HCA HW can tell the difference between an
@@ -274,6 +276,28 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
274 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 276 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
275 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; 277 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
276 278
279 if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) {
280 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
281 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
282 dev->caps.fs_log_max_ucast_qp_range_size =
283 dev_cap->fs_log_max_ucast_qp_range_size;
284 } else {
285 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
286 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) {
287 dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
288 } else {
289 dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
290
291 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
292 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
293 mlx4_warn(dev, "Must have UC_STEER and MC_STEER flags "
294 "set to use B0 steering. Falling back to A0 steering mode.\n");
295 }
296 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
297 }
298 mlx4_dbg(dev, "Steering mode is: %s\n",
299 mlx4_steering_mode_str(dev->caps.steering_mode));
300
277 /* Sense port always allowed on supported devices for ConnectX1 and 2 */ 301 /* Sense port always allowed on supported devices for ConnectX1 and 2 */
278 if (dev->pdev->device != 0x1003) 302 if (dev->pdev->device != 0x1003)
279 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 303 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
@@ -967,9 +991,11 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
967 } 991 }
968 992
969 /* 993 /*
970 * It's not strictly required, but for simplicity just map the 994 * For flow steering device managed mode it is required to use
971 * whole multicast group table now. The table isn't very big 995 * mlx4_init_icm_table. For B0 steering mode it's not strictly
972 * and it's a lot easier than trying to track ref counts. 996 * required, but for simplicity just map the whole multicast
997 * group table now. The table isn't very big and it's a lot
998 * easier than trying to track ref counts.
973 */ 999 */
974 err = mlx4_init_icm_table(dev, &priv->mcg_table.table, 1000 err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
975 init_hca->mc_base, 1001 init_hca->mc_base,
@@ -1205,7 +1231,26 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1205 goto err_stop_fw; 1231 goto err_stop_fw;
1206 } 1232 }
1207 1233
1234 priv->fs_hash_mode = MLX4_FS_L2_HASH;
1235
1236 switch (priv->fs_hash_mode) {
1237 case MLX4_FS_L2_HASH:
1238 init_hca.fs_hash_enable_bits = 0;
1239 break;
1240
1241 case MLX4_FS_L2_L3_L4_HASH:
1242 /* Enable flow steering with
1243 * udp unicast and tcp unicast
1244 */
1245 init_hca.fs_hash_enable_bits =
1246 MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN;
1247 break;
1248 }
1249
1208 profile = default_profile; 1250 profile = default_profile;
1251 if (dev->caps.steering_mode ==
1252 MLX4_STEERING_MODE_DEVICE_MANAGED)
1253 profile.num_mcg = MLX4_FS_NUM_MCG;
1209 1254
1210 icm_size = mlx4_make_profile(dev, &profile, &dev_cap, 1255 icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
1211 &init_hca); 1256 &init_hca);
@@ -1539,8 +1584,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
1539 struct mlx4_priv *priv = mlx4_priv(dev); 1584 struct mlx4_priv *priv = mlx4_priv(dev);
1540 struct msix_entry *entries; 1585 struct msix_entry *entries;
1541 int nreq = min_t(int, dev->caps.num_ports * 1586 int nreq = min_t(int, dev->caps.num_ports *
1542 min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT) 1587 min_t(int, netif_get_num_default_rss_queues() + 1,
1543 + MSIX_LEGACY_SZ, MAX_MSIX); 1588 MAX_MSIX_P_PORT) + MSIX_LEGACY_SZ, MAX_MSIX);
1544 int err; 1589 int err;
1545 int i; 1590 int i;
1546 1591
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index f4a8f98e402a..4ec3835e1bc2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -54,7 +54,12 @@ struct mlx4_mgm {
54 54
55int mlx4_get_mgm_entry_size(struct mlx4_dev *dev) 55int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
56{ 56{
57 return min((1 << mlx4_log_num_mgm_entry_size), MLX4_MAX_MGM_ENTRY_SIZE); 57 if (dev->caps.steering_mode ==
58 MLX4_STEERING_MODE_DEVICE_MANAGED)
59 return 1 << MLX4_FS_MGM_LOG_ENTRY_SIZE;
60 else
61 return min((1 << mlx4_log_num_mgm_entry_size),
62 MLX4_MAX_MGM_ENTRY_SIZE);
58} 63}
59 64
60int mlx4_get_qp_per_mgm(struct mlx4_dev *dev) 65int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
@@ -62,6 +67,35 @@ int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
62 return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2); 67 return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2);
63} 68}
64 69
70static int mlx4_QP_FLOW_STEERING_ATTACH(struct mlx4_dev *dev,
71 struct mlx4_cmd_mailbox *mailbox,
72 u32 size,
73 u64 *reg_id)
74{
75 u64 imm;
76 int err = 0;
77
78 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, size, 0,
79 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
80 MLX4_CMD_NATIVE);
81 if (err)
82 return err;
83 *reg_id = imm;
84
85 return err;
86}
87
88static int mlx4_QP_FLOW_STEERING_DETACH(struct mlx4_dev *dev, u64 regid)
89{
90 int err = 0;
91
92 err = mlx4_cmd(dev, regid, 0, 0,
93 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
94 MLX4_CMD_NATIVE);
95
96 return err;
97}
98
65static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index, 99static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
66 struct mlx4_cmd_mailbox *mailbox) 100 struct mlx4_cmd_mailbox *mailbox)
67{ 101{
@@ -614,6 +648,311 @@ static int find_entry(struct mlx4_dev *dev, u8 port,
614 return err; 648 return err;
615} 649}
616 650
651struct mlx4_net_trans_rule_hw_ctrl {
652 __be32 ctrl;
653 __be32 vf_vep_port;
654 __be32 qpn;
655 __be32 reserved;
656};
657
658static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl,
659 struct mlx4_net_trans_rule_hw_ctrl *hw)
660{
661 static const u8 __promisc_mode[] = {
662 [MLX4_FS_PROMISC_NONE] = 0x0,
663 [MLX4_FS_PROMISC_UPLINK] = 0x1,
664 [MLX4_FS_PROMISC_FUNCTION_PORT] = 0x2,
665 [MLX4_FS_PROMISC_ALL_MULTI] = 0x3,
666 };
667
668 u32 dw = 0;
669
670 dw = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0;
671 dw |= ctrl->exclusive ? (1 << 2) : 0;
672 dw |= ctrl->allow_loopback ? (1 << 3) : 0;
673 dw |= __promisc_mode[ctrl->promisc_mode] << 8;
674 dw |= ctrl->priority << 16;
675
676 hw->ctrl = cpu_to_be32(dw);
677 hw->vf_vep_port = cpu_to_be32(ctrl->port);
678 hw->qpn = cpu_to_be32(ctrl->qpn);
679}
680
681struct mlx4_net_trans_rule_hw_ib {
682 u8 size;
683 u8 rsvd1;
684 __be16 id;
685 u32 rsvd2;
686 __be32 qpn;
687 __be32 qpn_mask;
688 u8 dst_gid[16];
689 u8 dst_gid_msk[16];
690} __packed;
691
692struct mlx4_net_trans_rule_hw_eth {
693 u8 size;
694 u8 rsvd;
695 __be16 id;
696 u8 rsvd1[6];
697 u8 dst_mac[6];
698 u16 rsvd2;
699 u8 dst_mac_msk[6];
700 u16 rsvd3;
701 u8 src_mac[6];
702 u16 rsvd4;
703 u8 src_mac_msk[6];
704 u8 rsvd5;
705 u8 ether_type_enable;
706 __be16 ether_type;
707 __be16 vlan_id_msk;
708 __be16 vlan_id;
709} __packed;
710
711struct mlx4_net_trans_rule_hw_tcp_udp {
712 u8 size;
713 u8 rsvd;
714 __be16 id;
715 __be16 rsvd1[3];
716 __be16 dst_port;
717 __be16 rsvd2;
718 __be16 dst_port_msk;
719 __be16 rsvd3;
720 __be16 src_port;
721 __be16 rsvd4;
722 __be16 src_port_msk;
723} __packed;
724
725struct mlx4_net_trans_rule_hw_ipv4 {
726 u8 size;
727 u8 rsvd;
728 __be16 id;
729 __be32 rsvd1;
730 __be32 dst_ip;
731 __be32 dst_ip_msk;
732 __be32 src_ip;
733 __be32 src_ip_msk;
734} __packed;
735
736struct _rule_hw {
737 union {
738 struct {
739 u8 size;
740 u8 rsvd;
741 __be16 id;
742 };
743 struct mlx4_net_trans_rule_hw_eth eth;
744 struct mlx4_net_trans_rule_hw_ib ib;
745 struct mlx4_net_trans_rule_hw_ipv4 ipv4;
746 struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp;
747 };
748};
749
750static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
751 struct _rule_hw *rule_hw)
752{
753 static const u16 __sw_id_hw[] = {
754 [MLX4_NET_TRANS_RULE_ID_ETH] = 0xE001,
755 [MLX4_NET_TRANS_RULE_ID_IB] = 0xE005,
756 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003,
757 [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002,
758 [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004,
759 [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006
760 };
761
762 static const size_t __rule_hw_sz[] = {
763 [MLX4_NET_TRANS_RULE_ID_ETH] =
764 sizeof(struct mlx4_net_trans_rule_hw_eth),
765 [MLX4_NET_TRANS_RULE_ID_IB] =
766 sizeof(struct mlx4_net_trans_rule_hw_ib),
767 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0,
768 [MLX4_NET_TRANS_RULE_ID_IPV4] =
769 sizeof(struct mlx4_net_trans_rule_hw_ipv4),
770 [MLX4_NET_TRANS_RULE_ID_TCP] =
771 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
772 [MLX4_NET_TRANS_RULE_ID_UDP] =
773 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp)
774 };
775 if (spec->id >= MLX4_NET_TRANS_RULE_NUM) {
776 mlx4_err(dev, "Invalid network rule id. id = %d\n", spec->id);
777 return -EINVAL;
778 }
779 memset(rule_hw, 0, __rule_hw_sz[spec->id]);
780 rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]);
781 rule_hw->size = __rule_hw_sz[spec->id] >> 2;
782
783 switch (spec->id) {
784 case MLX4_NET_TRANS_RULE_ID_ETH:
785 memcpy(rule_hw->eth.dst_mac, spec->eth.dst_mac, ETH_ALEN);
786 memcpy(rule_hw->eth.dst_mac_msk, spec->eth.dst_mac_msk,
787 ETH_ALEN);
788 memcpy(rule_hw->eth.src_mac, spec->eth.src_mac, ETH_ALEN);
789 memcpy(rule_hw->eth.src_mac_msk, spec->eth.src_mac_msk,
790 ETH_ALEN);
791 if (spec->eth.ether_type_enable) {
792 rule_hw->eth.ether_type_enable = 1;
793 rule_hw->eth.ether_type = spec->eth.ether_type;
794 }
795 rule_hw->eth.vlan_id = spec->eth.vlan_id;
796 rule_hw->eth.vlan_id_msk = spec->eth.vlan_id_msk;
797 break;
798
799 case MLX4_NET_TRANS_RULE_ID_IB:
800 rule_hw->ib.qpn = spec->ib.r_qpn;
801 rule_hw->ib.qpn_mask = spec->ib.qpn_msk;
802 memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16);
803 memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16);
804 break;
805
806 case MLX4_NET_TRANS_RULE_ID_IPV6:
807 return -EOPNOTSUPP;
808
809 case MLX4_NET_TRANS_RULE_ID_IPV4:
810 rule_hw->ipv4.src_ip = spec->ipv4.src_ip;
811 rule_hw->ipv4.src_ip_msk = spec->ipv4.src_ip_msk;
812 rule_hw->ipv4.dst_ip = spec->ipv4.dst_ip;
813 rule_hw->ipv4.dst_ip_msk = spec->ipv4.dst_ip_msk;
814 break;
815
816 case MLX4_NET_TRANS_RULE_ID_TCP:
817 case MLX4_NET_TRANS_RULE_ID_UDP:
818 rule_hw->tcp_udp.dst_port = spec->tcp_udp.dst_port;
819 rule_hw->tcp_udp.dst_port_msk = spec->tcp_udp.dst_port_msk;
820 rule_hw->tcp_udp.src_port = spec->tcp_udp.src_port;
821 rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk;
822 break;
823
824 default:
825 return -EINVAL;
826 }
827
828 return __rule_hw_sz[spec->id];
829}
830
831static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
832 struct mlx4_net_trans_rule *rule)
833{
834#define BUF_SIZE 256
835 struct mlx4_spec_list *cur;
836 char buf[BUF_SIZE];
837 int len = 0;
838
839 mlx4_err(dev, "%s", str);
840 len += snprintf(buf + len, BUF_SIZE - len,
841 "port = %d prio = 0x%x qp = 0x%x ",
842 rule->port, rule->priority, rule->qpn);
843
844 list_for_each_entry(cur, &rule->list, list) {
845 switch (cur->id) {
846 case MLX4_NET_TRANS_RULE_ID_ETH:
847 len += snprintf(buf + len, BUF_SIZE - len,
848 "dmac = %pM ", &cur->eth.dst_mac);
849 if (cur->eth.ether_type)
850 len += snprintf(buf + len, BUF_SIZE - len,
851 "ethertype = 0x%x ",
852 be16_to_cpu(cur->eth.ether_type));
853 if (cur->eth.vlan_id)
854 len += snprintf(buf + len, BUF_SIZE - len,
855 "vlan-id = %d ",
856 be16_to_cpu(cur->eth.vlan_id));
857 break;
858
859 case MLX4_NET_TRANS_RULE_ID_IPV4:
860 if (cur->ipv4.src_ip)
861 len += snprintf(buf + len, BUF_SIZE - len,
862 "src-ip = %pI4 ",
863 &cur->ipv4.src_ip);
864 if (cur->ipv4.dst_ip)
865 len += snprintf(buf + len, BUF_SIZE - len,
866 "dst-ip = %pI4 ",
867 &cur->ipv4.dst_ip);
868 break;
869
870 case MLX4_NET_TRANS_RULE_ID_TCP:
871 case MLX4_NET_TRANS_RULE_ID_UDP:
872 if (cur->tcp_udp.src_port)
873 len += snprintf(buf + len, BUF_SIZE - len,
874 "src-port = %d ",
875 be16_to_cpu(cur->tcp_udp.src_port));
876 if (cur->tcp_udp.dst_port)
877 len += snprintf(buf + len, BUF_SIZE - len,
878 "dst-port = %d ",
879 be16_to_cpu(cur->tcp_udp.dst_port));
880 break;
881
882 case MLX4_NET_TRANS_RULE_ID_IB:
883 len += snprintf(buf + len, BUF_SIZE - len,
884 "dst-gid = %pI6\n", cur->ib.dst_gid);
885 len += snprintf(buf + len, BUF_SIZE - len,
886 "dst-gid-mask = %pI6\n",
887 cur->ib.dst_gid_msk);
888 break;
889
890 case MLX4_NET_TRANS_RULE_ID_IPV6:
891 break;
892
893 default:
894 break;
895 }
896 }
897 len += snprintf(buf + len, BUF_SIZE - len, "\n");
898 mlx4_err(dev, "%s", buf);
899
900 if (len >= BUF_SIZE)
901 mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n");
902}
903
904int mlx4_flow_attach(struct mlx4_dev *dev,
905 struct mlx4_net_trans_rule *rule, u64 *reg_id)
906{
907 struct mlx4_cmd_mailbox *mailbox;
908 struct mlx4_spec_list *cur;
909 u32 size = 0;
910 int ret;
911
912 mailbox = mlx4_alloc_cmd_mailbox(dev);
913 if (IS_ERR(mailbox))
914 return PTR_ERR(mailbox);
915
916 memset(mailbox->buf, 0, sizeof(struct mlx4_net_trans_rule_hw_ctrl));
917 trans_rule_ctrl_to_hw(rule, mailbox->buf);
918
919 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
920
921 list_for_each_entry(cur, &rule->list, list) {
922 ret = parse_trans_rule(dev, cur, mailbox->buf + size);
923 if (ret < 0) {
924 mlx4_free_cmd_mailbox(dev, mailbox);
925 return -EINVAL;
926 }
927 size += ret;
928 }
929
930 ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id);
931 if (ret == -ENOMEM)
932 mlx4_err_rule(dev,
933 "mcg table is full. Fail to register network rule.\n",
934 rule);
935 else if (ret)
936 mlx4_err_rule(dev, "Fail to register network rule.\n", rule);
937
938 mlx4_free_cmd_mailbox(dev, mailbox);
939
940 return ret;
941}
942EXPORT_SYMBOL_GPL(mlx4_flow_attach);
943
944int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
945{
946 int err;
947
948 err = mlx4_QP_FLOW_STEERING_DETACH(dev, reg_id);
949 if (err)
950 mlx4_err(dev, "Fail to detach network rule. registration id = 0x%llx\n",
951 reg_id);
952 return err;
953}
954EXPORT_SYMBOL_GPL(mlx4_flow_detach);
955
617int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 956int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
618 int block_mcast_loopback, enum mlx4_protocol prot, 957 int block_mcast_loopback, enum mlx4_protocol prot,
619 enum mlx4_steer_type steer) 958 enum mlx4_steer_type steer)
@@ -866,49 +1205,159 @@ static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
866} 1205}
867 1206
868int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1207int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
869 int block_mcast_loopback, enum mlx4_protocol prot) 1208 u8 port, int block_mcast_loopback,
1209 enum mlx4_protocol prot, u64 *reg_id)
870{ 1210{
871 if (prot == MLX4_PROT_ETH &&
872 !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
873 return 0;
874 1211
875 if (prot == MLX4_PROT_ETH) 1212 switch (dev->caps.steering_mode) {
876 gid[7] |= (MLX4_MC_STEER << 1); 1213 case MLX4_STEERING_MODE_A0:
1214 if (prot == MLX4_PROT_ETH)
1215 return 0;
1216
1217 case MLX4_STEERING_MODE_B0:
1218 if (prot == MLX4_PROT_ETH)
1219 gid[7] |= (MLX4_MC_STEER << 1);
1220
1221 if (mlx4_is_mfunc(dev))
1222 return mlx4_QP_ATTACH(dev, qp, gid, 1,
1223 block_mcast_loopback, prot);
1224 return mlx4_qp_attach_common(dev, qp, gid,
1225 block_mcast_loopback, prot,
1226 MLX4_MC_STEER);
1227
1228 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
1229 struct mlx4_spec_list spec = { {NULL} };
1230 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
1231
1232 struct mlx4_net_trans_rule rule = {
1233 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
1234 .exclusive = 0,
1235 .promisc_mode = MLX4_FS_PROMISC_NONE,
1236 .priority = MLX4_DOMAIN_NIC,
1237 };
1238
1239 rule.allow_loopback = ~block_mcast_loopback;
1240 rule.port = port;
1241 rule.qpn = qp->qpn;
1242 INIT_LIST_HEAD(&rule.list);
1243
1244 switch (prot) {
1245 case MLX4_PROT_ETH:
1246 spec.id = MLX4_NET_TRANS_RULE_ID_ETH;
1247 memcpy(spec.eth.dst_mac, &gid[10], ETH_ALEN);
1248 memcpy(spec.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
1249 break;
877 1250
878 if (mlx4_is_mfunc(dev)) 1251 case MLX4_PROT_IB_IPV6:
879 return mlx4_QP_ATTACH(dev, qp, gid, 1, 1252 spec.id = MLX4_NET_TRANS_RULE_ID_IB;
880 block_mcast_loopback, prot); 1253 memcpy(spec.ib.dst_gid, gid, 16);
1254 memset(&spec.ib.dst_gid_msk, 0xff, 16);
1255 break;
1256 default:
1257 return -EINVAL;
1258 }
1259 list_add_tail(&spec.list, &rule.list);
881 1260
882 return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback, 1261 return mlx4_flow_attach(dev, &rule, reg_id);
883 prot, MLX4_MC_STEER); 1262 }
1263
1264 default:
1265 return -EINVAL;
1266 }
884} 1267}
885EXPORT_SYMBOL_GPL(mlx4_multicast_attach); 1268EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
886 1269
887int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1270int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
888 enum mlx4_protocol prot) 1271 enum mlx4_protocol prot, u64 reg_id)
889{ 1272{
890 if (prot == MLX4_PROT_ETH && 1273 switch (dev->caps.steering_mode) {
891 !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)) 1274 case MLX4_STEERING_MODE_A0:
892 return 0; 1275 if (prot == MLX4_PROT_ETH)
1276 return 0;
893 1277
894 if (prot == MLX4_PROT_ETH) 1278 case MLX4_STEERING_MODE_B0:
895 gid[7] |= (MLX4_MC_STEER << 1); 1279 if (prot == MLX4_PROT_ETH)
1280 gid[7] |= (MLX4_MC_STEER << 1);
896 1281
897 if (mlx4_is_mfunc(dev)) 1282 if (mlx4_is_mfunc(dev))
898 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); 1283 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
1284
1285 return mlx4_qp_detach_common(dev, qp, gid, prot,
1286 MLX4_MC_STEER);
1287
1288 case MLX4_STEERING_MODE_DEVICE_MANAGED:
1289 return mlx4_flow_detach(dev, reg_id);
899 1290
900 return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_MC_STEER); 1291 default:
1292 return -EINVAL;
1293 }
901} 1294}
902EXPORT_SYMBOL_GPL(mlx4_multicast_detach); 1295EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
903 1296
1297int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port,
1298 u32 qpn, enum mlx4_net_trans_promisc_mode mode)
1299{
1300 struct mlx4_net_trans_rule rule;
1301 u64 *regid_p;
1302
1303 switch (mode) {
1304 case MLX4_FS_PROMISC_UPLINK:
1305 case MLX4_FS_PROMISC_FUNCTION_PORT:
1306 regid_p = &dev->regid_promisc_array[port];
1307 break;
1308 case MLX4_FS_PROMISC_ALL_MULTI:
1309 regid_p = &dev->regid_allmulti_array[port];
1310 break;
1311 default:
1312 return -1;
1313 }
1314
1315 if (*regid_p != 0)
1316 return -1;
1317
1318 rule.promisc_mode = mode;
1319 rule.port = port;
1320 rule.qpn = qpn;
1321 INIT_LIST_HEAD(&rule.list);
1322 mlx4_err(dev, "going promisc on %x\n", port);
1323
1324 return mlx4_flow_attach(dev, &rule, regid_p);
1325}
1326EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_add);
1327
1328int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
1329 enum mlx4_net_trans_promisc_mode mode)
1330{
1331 int ret;
1332 u64 *regid_p;
1333
1334 switch (mode) {
1335 case MLX4_FS_PROMISC_UPLINK:
1336 case MLX4_FS_PROMISC_FUNCTION_PORT:
1337 regid_p = &dev->regid_promisc_array[port];
1338 break;
1339 case MLX4_FS_PROMISC_ALL_MULTI:
1340 regid_p = &dev->regid_allmulti_array[port];
1341 break;
1342 default:
1343 return -1;
1344 }
1345
1346 if (*regid_p == 0)
1347 return -1;
1348
1349 ret = mlx4_flow_detach(dev, *regid_p);
1350 if (ret == 0)
1351 *regid_p = 0;
1352
1353 return ret;
1354}
1355EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_remove);
1356
904int mlx4_unicast_attach(struct mlx4_dev *dev, 1357int mlx4_unicast_attach(struct mlx4_dev *dev,
905 struct mlx4_qp *qp, u8 gid[16], 1358 struct mlx4_qp *qp, u8 gid[16],
906 int block_mcast_loopback, enum mlx4_protocol prot) 1359 int block_mcast_loopback, enum mlx4_protocol prot)
907{ 1360{
908 if (prot == MLX4_PROT_ETH &&
909 !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
910 return 0;
911
912 if (prot == MLX4_PROT_ETH) 1361 if (prot == MLX4_PROT_ETH)
913 gid[7] |= (MLX4_UC_STEER << 1); 1362 gid[7] |= (MLX4_UC_STEER << 1);
914 1363
@@ -924,10 +1373,6 @@ EXPORT_SYMBOL_GPL(mlx4_unicast_attach);
924int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, 1373int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
925 u8 gid[16], enum mlx4_protocol prot) 1374 u8 gid[16], enum mlx4_protocol prot)
926{ 1375{
927 if (prot == MLX4_PROT_ETH &&
928 !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
929 return 0;
930
931 if (prot == MLX4_PROT_ETH) 1376 if (prot == MLX4_PROT_ETH)
932 gid[7] |= (MLX4_UC_STEER << 1); 1377 gid[7] |= (MLX4_UC_STEER << 1);
933 1378
@@ -968,9 +1413,6 @@ static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn,
968 1413
969int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) 1414int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
970{ 1415{
971 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
972 return 0;
973
974 if (mlx4_is_mfunc(dev)) 1416 if (mlx4_is_mfunc(dev))
975 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port); 1417 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port);
976 1418
@@ -980,9 +1422,6 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
980 1422
981int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) 1423int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
982{ 1424{
983 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
984 return 0;
985
986 if (mlx4_is_mfunc(dev)) 1425 if (mlx4_is_mfunc(dev))
987 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port); 1426 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port);
988 1427
@@ -992,9 +1431,6 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
992 1431
993int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) 1432int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
994{ 1433{
995 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
996 return 0;
997
998 if (mlx4_is_mfunc(dev)) 1434 if (mlx4_is_mfunc(dev))
999 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port); 1435 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port);
1000 1436
@@ -1004,9 +1440,6 @@ EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
1004 1440
1005int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) 1441int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
1006{ 1442{
1007 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
1008 return 0;
1009
1010 if (mlx4_is_mfunc(dev)) 1443 if (mlx4_is_mfunc(dev))
1011 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port); 1444 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port);
1012 1445
@@ -1019,6 +1452,10 @@ int mlx4_init_mcg_table(struct mlx4_dev *dev)
1019 struct mlx4_priv *priv = mlx4_priv(dev); 1452 struct mlx4_priv *priv = mlx4_priv(dev);
1020 int err; 1453 int err;
1021 1454
1455 /* No need for mcg_table when fw managed the mcg table*/
1456 if (dev->caps.steering_mode ==
1457 MLX4_STEERING_MODE_DEVICE_MANAGED)
1458 return 0;
1022 err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms, 1459 err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
1023 dev->caps.num_amgms - 1, 0, 0); 1460 dev->caps.num_amgms - 1, 0, 0);
1024 if (err) 1461 if (err)
@@ -1031,5 +1468,7 @@ int mlx4_init_mcg_table(struct mlx4_dev *dev)
1031 1468
1032void mlx4_cleanup_mcg_table(struct mlx4_dev *dev) 1469void mlx4_cleanup_mcg_table(struct mlx4_dev *dev)
1033{ 1470{
1034 mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap); 1471 if (dev->caps.steering_mode !=
1472 MLX4_STEERING_MODE_DEVICE_MANAGED)
1473 mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);
1035} 1474}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index e5d20220762c..d2c436b10fbf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -39,6 +39,7 @@
39 39
40#include <linux/mutex.h> 40#include <linux/mutex.h>
41#include <linux/radix-tree.h> 41#include <linux/radix-tree.h>
42#include <linux/rbtree.h>
42#include <linux/timer.h> 43#include <linux/timer.h>
43#include <linux/semaphore.h> 44#include <linux/semaphore.h>
44#include <linux/workqueue.h> 45#include <linux/workqueue.h>
@@ -53,6 +54,17 @@
53#define DRV_VERSION "1.1" 54#define DRV_VERSION "1.1"
54#define DRV_RELDATE "Dec, 2011" 55#define DRV_RELDATE "Dec, 2011"
55 56
57#define MLX4_FS_UDP_UC_EN (1 << 1)
58#define MLX4_FS_TCP_UC_EN (1 << 2)
59#define MLX4_FS_NUM_OF_L2_ADDR 8
60#define MLX4_FS_MGM_LOG_ENTRY_SIZE 7
61#define MLX4_FS_NUM_MCG (1 << 17)
62
63enum {
64 MLX4_FS_L2_HASH = 0,
65 MLX4_FS_L2_L3_L4_HASH,
66};
67
56#define MLX4_NUM_UP 8 68#define MLX4_NUM_UP 8
57#define MLX4_NUM_TC 8 69#define MLX4_NUM_TC 8
58#define MLX4_RATELIMIT_UNITS 3 /* 100 Mbps */ 70#define MLX4_RATELIMIT_UNITS 3 /* 100 Mbps */
@@ -137,6 +149,7 @@ enum mlx4_resource {
137 RES_VLAN, 149 RES_VLAN,
138 RES_EQ, 150 RES_EQ,
139 RES_COUNTER, 151 RES_COUNTER,
152 RES_FS_RULE,
140 MLX4_NUM_OF_RESOURCE_TYPE 153 MLX4_NUM_OF_RESOURCE_TYPE
141}; 154};
142 155
@@ -509,7 +522,7 @@ struct slave_list {
509struct mlx4_resource_tracker { 522struct mlx4_resource_tracker {
510 spinlock_t lock; 523 spinlock_t lock;
511 /* tree for each resources */ 524 /* tree for each resources */
512 struct radix_tree_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE]; 525 struct rb_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE];
513 /* num_of_slave's lists, one per slave */ 526 /* num_of_slave's lists, one per slave */
514 struct slave_list *slave_list; 527 struct slave_list *slave_list;
515}; 528};
@@ -703,6 +716,7 @@ struct mlx4_set_port_rqp_calc_context {
703 716
704struct mlx4_mac_entry { 717struct mlx4_mac_entry {
705 u64 mac; 718 u64 mac;
719 u64 reg_id;
706}; 720};
707 721
708struct mlx4_port_info { 722struct mlx4_port_info {
@@ -776,6 +790,7 @@ struct mlx4_priv {
776 struct mutex bf_mutex; 790 struct mutex bf_mutex;
777 struct io_mapping *bf_mapping; 791 struct io_mapping *bf_mapping;
778 int reserved_mtts; 792 int reserved_mtts;
793 int fs_hash_mode;
779}; 794};
780 795
781static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) 796static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
@@ -1032,7 +1047,7 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
1032/* resource tracker functions*/ 1047/* resource tracker functions*/
1033int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, 1048int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
1034 enum mlx4_resource resource_type, 1049 enum mlx4_resource resource_type,
1035 int resource_id, int *slave); 1050 u64 resource_id, int *slave);
1036void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id); 1051void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id);
1037int mlx4_init_resource_tracker(struct mlx4_dev *dev); 1052int mlx4_init_resource_tracker(struct mlx4_dev *dev);
1038 1053
@@ -1117,6 +1132,16 @@ int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
1117 struct mlx4_cmd_mailbox *inbox, 1132 struct mlx4_cmd_mailbox *inbox,
1118 struct mlx4_cmd_mailbox *outbox, 1133 struct mlx4_cmd_mailbox *outbox,
1119 struct mlx4_cmd_info *cmd); 1134 struct mlx4_cmd_info *cmd);
1135int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
1136 struct mlx4_vhcr *vhcr,
1137 struct mlx4_cmd_mailbox *inbox,
1138 struct mlx4_cmd_mailbox *outbox,
1139 struct mlx4_cmd_info *cmd);
1140int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
1141 struct mlx4_vhcr *vhcr,
1142 struct mlx4_cmd_mailbox *inbox,
1143 struct mlx4_cmd_mailbox *outbox,
1144 struct mlx4_cmd_info *cmd);
1120 1145
1121int mlx4_get_mgm_entry_size(struct mlx4_dev *dev); 1146int mlx4_get_mgm_entry_size(struct mlx4_dev *dev);
1122int mlx4_get_qp_per_mgm(struct mlx4_dev *dev); 1147int mlx4_get_qp_per_mgm(struct mlx4_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 225c20d47900..5f1ab105debc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -43,6 +43,7 @@
43#ifdef CONFIG_MLX4_EN_DCB 43#ifdef CONFIG_MLX4_EN_DCB
44#include <linux/dcbnl.h> 44#include <linux/dcbnl.h>
45#endif 45#endif
46#include <linux/cpu_rmap.h>
46 47
47#include <linux/mlx4/device.h> 48#include <linux/mlx4/device.h>
48#include <linux/mlx4/qp.h> 49#include <linux/mlx4/qp.h>
@@ -75,6 +76,10 @@
75#define STAMP_SHIFT 31 76#define STAMP_SHIFT 31
76#define STAMP_VAL 0x7fffffff 77#define STAMP_VAL 0x7fffffff
77#define STATS_DELAY (HZ / 4) 78#define STATS_DELAY (HZ / 4)
79#define MAX_NUM_OF_FS_RULES 256
80
81#define MLX4_EN_FILTER_HASH_SHIFT 4
82#define MLX4_EN_FILTER_EXPIRY_QUOTA 60
78 83
79/* Typical TSO descriptor with 16 gather entries is 352 bytes... */ 84/* Typical TSO descriptor with 16 gather entries is 352 bytes... */
80#define MAX_DESC_SIZE 512 85#define MAX_DESC_SIZE 512
@@ -106,7 +111,7 @@ enum {
106#define MLX4_EN_MAX_TX_SIZE 8192 111#define MLX4_EN_MAX_TX_SIZE 8192
107#define MLX4_EN_MAX_RX_SIZE 8192 112#define MLX4_EN_MAX_RX_SIZE 8192
108 113
109/* Minimum ring size for our page-allocation sceme to work */ 114/* Minimum ring size for our page-allocation scheme to work */
110#define MLX4_EN_MIN_RX_SIZE (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES) 115#define MLX4_EN_MIN_RX_SIZE (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES)
111#define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE) 116#define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE)
112 117
@@ -227,6 +232,7 @@ struct mlx4_en_tx_desc {
227 232
228struct mlx4_en_rx_alloc { 233struct mlx4_en_rx_alloc {
229 struct page *page; 234 struct page *page;
235 dma_addr_t dma;
230 u16 offset; 236 u16 offset;
231}; 237};
232 238
@@ -404,6 +410,19 @@ struct mlx4_en_perf_stats {
404#define NUM_PERF_COUNTERS 6 410#define NUM_PERF_COUNTERS 6
405}; 411};
406 412
413enum mlx4_en_mclist_act {
414 MCLIST_NONE,
415 MCLIST_REM,
416 MCLIST_ADD,
417};
418
419struct mlx4_en_mc_list {
420 struct list_head list;
421 enum mlx4_en_mclist_act action;
422 u8 addr[ETH_ALEN];
423 u64 reg_id;
424};
425
407struct mlx4_en_frag_info { 426struct mlx4_en_frag_info {
408 u16 frag_size; 427 u16 frag_size;
409 u16 frag_prefix_size; 428 u16 frag_prefix_size;
@@ -422,6 +441,11 @@ struct mlx4_en_frag_info {
422 441
423#endif 442#endif
424 443
444struct ethtool_flow_id {
445 struct ethtool_rx_flow_spec flow_spec;
446 u64 id;
447};
448
425struct mlx4_en_priv { 449struct mlx4_en_priv {
426 struct mlx4_en_dev *mdev; 450 struct mlx4_en_dev *mdev;
427 struct mlx4_en_port_profile *prof; 451 struct mlx4_en_port_profile *prof;
@@ -431,6 +455,7 @@ struct mlx4_en_priv {
431 struct net_device_stats ret_stats; 455 struct net_device_stats ret_stats;
432 struct mlx4_en_port_state port_state; 456 struct mlx4_en_port_state port_state;
433 spinlock_t stats_lock; 457 spinlock_t stats_lock;
458 struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES];
434 459
435 unsigned long last_moder_packets[MAX_RX_RINGS]; 460 unsigned long last_moder_packets[MAX_RX_RINGS];
436 unsigned long last_moder_tx_packets; 461 unsigned long last_moder_tx_packets;
@@ -480,6 +505,7 @@ struct mlx4_en_priv {
480 struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS]; 505 struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS];
481 struct mlx4_en_cq *tx_cq; 506 struct mlx4_en_cq *tx_cq;
482 struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; 507 struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
508 struct mlx4_qp drop_qp;
483 struct work_struct mcast_task; 509 struct work_struct mcast_task;
484 struct work_struct mac_task; 510 struct work_struct mac_task;
485 struct work_struct watchdog_task; 511 struct work_struct watchdog_task;
@@ -489,8 +515,9 @@ struct mlx4_en_priv {
489 struct mlx4_en_pkt_stats pkstats; 515 struct mlx4_en_pkt_stats pkstats;
490 struct mlx4_en_port_stats port_stats; 516 struct mlx4_en_port_stats port_stats;
491 u64 stats_bitmap; 517 u64 stats_bitmap;
492 char *mc_addrs; 518 struct list_head mc_list;
493 int mc_addrs_cnt; 519 struct list_head curr_list;
520 u64 broadcast_id;
494 struct mlx4_en_stat_out_mbox hw_stats; 521 struct mlx4_en_stat_out_mbox hw_stats;
495 int vids[128]; 522 int vids[128];
496 bool wol; 523 bool wol;
@@ -501,6 +528,13 @@ struct mlx4_en_priv {
501 struct ieee_ets ets; 528 struct ieee_ets ets;
502 u16 maxrate[IEEE_8021QAZ_MAX_TCS]; 529 u16 maxrate[IEEE_8021QAZ_MAX_TCS];
503#endif 530#endif
531#ifdef CONFIG_RFS_ACCEL
532 spinlock_t filters_lock;
533 int last_filter_id;
534 struct list_head filters;
535 struct hlist_head filter_hash[1 << MLX4_EN_FILTER_HASH_SHIFT];
536#endif
537
504}; 538};
505 539
506enum mlx4_en_wol { 540enum mlx4_en_wol {
@@ -565,6 +599,8 @@ void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
565void mlx4_en_calc_rx_buf(struct net_device *dev); 599void mlx4_en_calc_rx_buf(struct net_device *dev);
566int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv); 600int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
567void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv); 601void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
602int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv);
603void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv);
568int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring); 604int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
569void mlx4_en_rx_irq(struct mlx4_cq *mcq); 605void mlx4_en_rx_irq(struct mlx4_cq *mcq);
570 606
@@ -578,6 +614,11 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
578extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops; 614extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops;
579#endif 615#endif
580 616
617#ifdef CONFIG_RFS_ACCEL
618void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
619 struct mlx4_en_rx_ring *rx_ring);
620#endif
621
581#define MLX4_EN_NUM_SELF_TEST 5 622#define MLX4_EN_NUM_SELF_TEST 5
582void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf); 623void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
583u64 mlx4_en_mac_to_u64(u8 *addr); 624u64 mlx4_en_mac_to_u64(u8 *addr);
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index a8fb52992c64..028833ffc56f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -39,7 +39,6 @@
39#include "mlx4.h" 39#include "mlx4.h"
40 40
41#define MLX4_MAC_VALID (1ull << 63) 41#define MLX4_MAC_VALID (1ull << 63)
42#define MLX4_MAC_MASK 0xffffffffffffULL
43 42
44#define MLX4_VLAN_VALID (1u << 31) 43#define MLX4_VLAN_VALID (1u << 31)
45#define MLX4_VLAN_MASK 0xfff 44#define MLX4_VLAN_MASK 0xfff
@@ -75,21 +74,54 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
75 table->total = 0; 74 table->total = 0;
76} 75}
77 76
78static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn) 77static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
78 u64 mac, int *qpn, u64 *reg_id)
79{ 79{
80 struct mlx4_qp qp;
81 u8 gid[16] = {0};
82 __be64 be_mac; 80 __be64 be_mac;
83 int err; 81 int err;
84 82
85 qp.qpn = *qpn; 83 mac &= MLX4_MAC_MASK;
86
87 mac &= 0xffffffffffffULL;
88 be_mac = cpu_to_be64(mac << 16); 84 be_mac = cpu_to_be64(mac << 16);
89 memcpy(&gid[10], &be_mac, ETH_ALEN);
90 gid[5] = port;
91 85
92 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH); 86 switch (dev->caps.steering_mode) {
87 case MLX4_STEERING_MODE_B0: {
88 struct mlx4_qp qp;
89 u8 gid[16] = {0};
90
91 qp.qpn = *qpn;
92 memcpy(&gid[10], &be_mac, ETH_ALEN);
93 gid[5] = port;
94
95 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
96 break;
97 }
98 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
99 struct mlx4_spec_list spec_eth = { {NULL} };
100 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
101
102 struct mlx4_net_trans_rule rule = {
103 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
104 .exclusive = 0,
105 .allow_loopback = 1,
106 .promisc_mode = MLX4_FS_PROMISC_NONE,
107 .priority = MLX4_DOMAIN_NIC,
108 };
109
110 rule.port = port;
111 rule.qpn = *qpn;
112 INIT_LIST_HEAD(&rule.list);
113
114 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
115 memcpy(spec_eth.eth.dst_mac, &be_mac, ETH_ALEN);
116 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
117 list_add_tail(&spec_eth.list, &rule.list);
118
119 err = mlx4_flow_attach(dev, &rule, reg_id);
120 break;
121 }
122 default:
123 return -EINVAL;
124 }
93 if (err) 125 if (err)
94 mlx4_warn(dev, "Failed Attaching Unicast\n"); 126 mlx4_warn(dev, "Failed Attaching Unicast\n");
95 127
@@ -97,19 +129,30 @@ static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
97} 129}
98 130
99static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port, 131static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
100 u64 mac, int qpn) 132 u64 mac, int qpn, u64 reg_id)
101{ 133{
102 struct mlx4_qp qp; 134 switch (dev->caps.steering_mode) {
103 u8 gid[16] = {0}; 135 case MLX4_STEERING_MODE_B0: {
104 __be64 be_mac; 136 struct mlx4_qp qp;
137 u8 gid[16] = {0};
138 __be64 be_mac;
105 139
106 qp.qpn = qpn; 140 qp.qpn = qpn;
107 mac &= 0xffffffffffffULL; 141 mac &= MLX4_MAC_MASK;
108 be_mac = cpu_to_be64(mac << 16); 142 be_mac = cpu_to_be64(mac << 16);
109 memcpy(&gid[10], &be_mac, ETH_ALEN); 143 memcpy(&gid[10], &be_mac, ETH_ALEN);
110 gid[5] = port; 144 gid[5] = port;
111 145
112 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH); 146 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
147 break;
148 }
149 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
150 mlx4_flow_detach(dev, reg_id);
151 break;
152 }
153 default:
154 mlx4_err(dev, "Invalid steering mode.\n");
155 }
113} 156}
114 157
115static int validate_index(struct mlx4_dev *dev, 158static int validate_index(struct mlx4_dev *dev,
@@ -144,6 +187,7 @@ int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
144 struct mlx4_mac_entry *entry; 187 struct mlx4_mac_entry *entry;
145 int index = 0; 188 int index = 0;
146 int err = 0; 189 int err = 0;
190 u64 reg_id;
147 191
148 mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n", 192 mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n",
149 (unsigned long long) mac); 193 (unsigned long long) mac);
@@ -155,7 +199,7 @@ int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
155 return err; 199 return err;
156 } 200 }
157 201
158 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) { 202 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
159 *qpn = info->base_qpn + index; 203 *qpn = info->base_qpn + index;
160 return 0; 204 return 0;
161 } 205 }
@@ -167,7 +211,7 @@ int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
167 goto qp_err; 211 goto qp_err;
168 } 212 }
169 213
170 err = mlx4_uc_steer_add(dev, port, mac, qpn); 214 err = mlx4_uc_steer_add(dev, port, mac, qpn, &reg_id);
171 if (err) 215 if (err)
172 goto steer_err; 216 goto steer_err;
173 217
@@ -177,6 +221,7 @@ int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
177 goto alloc_err; 221 goto alloc_err;
178 } 222 }
179 entry->mac = mac; 223 entry->mac = mac;
224 entry->reg_id = reg_id;
180 err = radix_tree_insert(&info->mac_tree, *qpn, entry); 225 err = radix_tree_insert(&info->mac_tree, *qpn, entry);
181 if (err) 226 if (err)
182 goto insert_err; 227 goto insert_err;
@@ -186,7 +231,7 @@ insert_err:
186 kfree(entry); 231 kfree(entry);
187 232
188alloc_err: 233alloc_err:
189 mlx4_uc_steer_release(dev, port, mac, *qpn); 234 mlx4_uc_steer_release(dev, port, mac, *qpn, reg_id);
190 235
191steer_err: 236steer_err:
192 mlx4_qp_release_range(dev, *qpn, 1); 237 mlx4_qp_release_range(dev, *qpn, 1);
@@ -206,13 +251,14 @@ void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn)
206 (unsigned long long) mac); 251 (unsigned long long) mac);
207 mlx4_unregister_mac(dev, port, mac); 252 mlx4_unregister_mac(dev, port, mac);
208 253
209 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) { 254 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
210 entry = radix_tree_lookup(&info->mac_tree, qpn); 255 entry = radix_tree_lookup(&info->mac_tree, qpn);
211 if (entry) { 256 if (entry) {
212 mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx," 257 mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx,"
213 " qpn %d\n", port, 258 " qpn %d\n", port,
214 (unsigned long long) mac, qpn); 259 (unsigned long long) mac, qpn);
215 mlx4_uc_steer_release(dev, port, entry->mac, qpn); 260 mlx4_uc_steer_release(dev, port, entry->mac,
261 qpn, entry->reg_id);
216 mlx4_qp_release_range(dev, qpn, 1); 262 mlx4_qp_release_range(dev, qpn, 1);
217 radix_tree_delete(&info->mac_tree, qpn); 263 radix_tree_delete(&info->mac_tree, qpn);
218 kfree(entry); 264 kfree(entry);
@@ -359,15 +405,18 @@ int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
359 int index = qpn - info->base_qpn; 405 int index = qpn - info->base_qpn;
360 int err = 0; 406 int err = 0;
361 407
362 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) { 408 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
363 entry = radix_tree_lookup(&info->mac_tree, qpn); 409 entry = radix_tree_lookup(&info->mac_tree, qpn);
364 if (!entry) 410 if (!entry)
365 return -EINVAL; 411 return -EINVAL;
366 mlx4_uc_steer_release(dev, port, entry->mac, qpn); 412 mlx4_uc_steer_release(dev, port, entry->mac,
413 qpn, entry->reg_id);
367 mlx4_unregister_mac(dev, port, entry->mac); 414 mlx4_unregister_mac(dev, port, entry->mac);
368 entry->mac = new_mac; 415 entry->mac = new_mac;
416 entry->reg_id = 0;
369 mlx4_register_mac(dev, port, new_mac); 417 mlx4_register_mac(dev, port, new_mac);
370 err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn); 418 err = mlx4_uc_steer_add(dev, port, entry->mac,
419 &qpn, &entry->reg_id);
371 return err; 420 return err;
372 } 421 }
373 422
@@ -803,8 +852,7 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
803 u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ? 852 u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
804 MCAST_DIRECT : MCAST_DEFAULT; 853 MCAST_DIRECT : MCAST_DEFAULT;
805 854
806 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER && 855 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
807 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)
808 return 0; 856 return 0;
809 857
810 mailbox = mlx4_alloc_cmd_mailbox(dev); 858 mailbox = mlx4_alloc_cmd_mailbox(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index b83bc928d52a..9ee4725363d5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -237,13 +237,19 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
237 init_hca->mtt_base = profile[i].start; 237 init_hca->mtt_base = profile[i].start;
238 break; 238 break;
239 case MLX4_RES_MCG: 239 case MLX4_RES_MCG:
240 dev->caps.num_mgms = profile[i].num >> 1;
241 dev->caps.num_amgms = profile[i].num >> 1;
242 init_hca->mc_base = profile[i].start; 240 init_hca->mc_base = profile[i].start;
243 init_hca->log_mc_entry_sz = 241 init_hca->log_mc_entry_sz =
244 ilog2(mlx4_get_mgm_entry_size(dev)); 242 ilog2(mlx4_get_mgm_entry_size(dev));
245 init_hca->log_mc_table_sz = profile[i].log_num; 243 init_hca->log_mc_table_sz = profile[i].log_num;
246 init_hca->log_mc_hash_sz = profile[i].log_num - 1; 244 if (dev->caps.steering_mode ==
245 MLX4_STEERING_MODE_DEVICE_MANAGED) {
246 dev->caps.num_mgms = profile[i].num;
247 } else {
248 init_hca->log_mc_hash_sz =
249 profile[i].log_num - 1;
250 dev->caps.num_mgms = profile[i].num >> 1;
251 dev->caps.num_amgms = profile[i].num >> 1;
252 }
247 break; 253 break;
248 default: 254 default:
249 break; 255 break;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index b45d0e7f6ab0..94ceddd17ab2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -41,13 +41,12 @@
41#include <linux/slab.h> 41#include <linux/slab.h>
42#include <linux/mlx4/cmd.h> 42#include <linux/mlx4/cmd.h>
43#include <linux/mlx4/qp.h> 43#include <linux/mlx4/qp.h>
44#include <linux/if_ether.h>
44 45
45#include "mlx4.h" 46#include "mlx4.h"
46#include "fw.h" 47#include "fw.h"
47 48
48#define MLX4_MAC_VALID (1ull << 63) 49#define MLX4_MAC_VALID (1ull << 63)
49#define MLX4_MAC_MASK 0x7fffffffffffffffULL
50#define ETH_ALEN 6
51 50
52struct mac_res { 51struct mac_res {
53 struct list_head list; 52 struct list_head list;
@@ -57,7 +56,8 @@ struct mac_res {
57 56
58struct res_common { 57struct res_common {
59 struct list_head list; 58 struct list_head list;
60 u32 res_id; 59 struct rb_node node;
60 u64 res_id;
61 int owner; 61 int owner;
62 int state; 62 int state;
63 int from_state; 63 int from_state;
@@ -189,6 +189,58 @@ struct res_xrcdn {
189 int port; 189 int port;
190}; 190};
191 191
192enum res_fs_rule_states {
193 RES_FS_RULE_BUSY = RES_ANY_BUSY,
194 RES_FS_RULE_ALLOCATED,
195};
196
197struct res_fs_rule {
198 struct res_common com;
199};
200
201static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
202{
203 struct rb_node *node = root->rb_node;
204
205 while (node) {
206 struct res_common *res = container_of(node, struct res_common,
207 node);
208
209 if (res_id < res->res_id)
210 node = node->rb_left;
211 else if (res_id > res->res_id)
212 node = node->rb_right;
213 else
214 return res;
215 }
216 return NULL;
217}
218
219static int res_tracker_insert(struct rb_root *root, struct res_common *res)
220{
221 struct rb_node **new = &(root->rb_node), *parent = NULL;
222
223 /* Figure out where to put new node */
224 while (*new) {
225 struct res_common *this = container_of(*new, struct res_common,
226 node);
227
228 parent = *new;
229 if (res->res_id < this->res_id)
230 new = &((*new)->rb_left);
231 else if (res->res_id > this->res_id)
232 new = &((*new)->rb_right);
233 else
234 return -EEXIST;
235 }
236
237 /* Add new node and rebalance tree. */
238 rb_link_node(&res->node, parent, new);
239 rb_insert_color(&res->node, root);
240
241 return 0;
242}
243
192/* For Debug uses */ 244/* For Debug uses */
193static const char *ResourceType(enum mlx4_resource rt) 245static const char *ResourceType(enum mlx4_resource rt)
194{ 246{
@@ -201,6 +253,7 @@ static const char *ResourceType(enum mlx4_resource rt)
201 case RES_MAC: return "RES_MAC"; 253 case RES_MAC: return "RES_MAC";
202 case RES_EQ: return "RES_EQ"; 254 case RES_EQ: return "RES_EQ";
203 case RES_COUNTER: return "RES_COUNTER"; 255 case RES_COUNTER: return "RES_COUNTER";
256 case RES_FS_RULE: return "RES_FS_RULE";
204 case RES_XRCD: return "RES_XRCD"; 257 case RES_XRCD: return "RES_XRCD";
205 default: return "Unknown resource type !!!"; 258 default: return "Unknown resource type !!!";
206 }; 259 };
@@ -228,8 +281,7 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
228 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n", 281 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
229 dev->num_slaves); 282 dev->num_slaves);
230 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) 283 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
231 INIT_RADIX_TREE(&priv->mfunc.master.res_tracker.res_tree[i], 284 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
232 GFP_ATOMIC|__GFP_NOWARN);
233 285
234 spin_lock_init(&priv->mfunc.master.res_tracker.lock); 286 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
235 return 0 ; 287 return 0 ;
@@ -277,11 +329,11 @@ static void *find_res(struct mlx4_dev *dev, int res_id,
277{ 329{
278 struct mlx4_priv *priv = mlx4_priv(dev); 330 struct mlx4_priv *priv = mlx4_priv(dev);
279 331
280 return radix_tree_lookup(&priv->mfunc.master.res_tracker.res_tree[type], 332 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
281 res_id); 333 res_id);
282} 334}
283 335
284static int get_res(struct mlx4_dev *dev, int slave, int res_id, 336static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
285 enum mlx4_resource type, 337 enum mlx4_resource type,
286 void *res) 338 void *res)
287{ 339{
@@ -307,7 +359,7 @@ static int get_res(struct mlx4_dev *dev, int slave, int res_id,
307 359
308 r->from_state = r->state; 360 r->from_state = r->state;
309 r->state = RES_ANY_BUSY; 361 r->state = RES_ANY_BUSY;
310 mlx4_dbg(dev, "res %s id 0x%x to busy\n", 362 mlx4_dbg(dev, "res %s id 0x%llx to busy\n",
311 ResourceType(type), r->res_id); 363 ResourceType(type), r->res_id);
312 364
313 if (res) 365 if (res)
@@ -320,7 +372,7 @@ exit:
320 372
321int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, 373int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
322 enum mlx4_resource type, 374 enum mlx4_resource type,
323 int res_id, int *slave) 375 u64 res_id, int *slave)
324{ 376{
325 377
326 struct res_common *r; 378 struct res_common *r;
@@ -341,7 +393,7 @@ int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
341 return err; 393 return err;
342} 394}
343 395
344static void put_res(struct mlx4_dev *dev, int slave, int res_id, 396static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
345 enum mlx4_resource type) 397 enum mlx4_resource type)
346{ 398{
347 struct res_common *r; 399 struct res_common *r;
@@ -473,7 +525,21 @@ static struct res_common *alloc_xrcdn_tr(int id)
473 return &ret->com; 525 return &ret->com;
474} 526}
475 527
476static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave, 528static struct res_common *alloc_fs_rule_tr(u64 id)
529{
530 struct res_fs_rule *ret;
531
532 ret = kzalloc(sizeof *ret, GFP_KERNEL);
533 if (!ret)
534 return NULL;
535
536 ret->com.res_id = id;
537 ret->com.state = RES_FS_RULE_ALLOCATED;
538
539 return &ret->com;
540}
541
542static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
477 int extra) 543 int extra)
478{ 544{
479 struct res_common *ret; 545 struct res_common *ret;
@@ -506,6 +572,9 @@ static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
506 case RES_XRCD: 572 case RES_XRCD:
507 ret = alloc_xrcdn_tr(id); 573 ret = alloc_xrcdn_tr(id);
508 break; 574 break;
575 case RES_FS_RULE:
576 ret = alloc_fs_rule_tr(id);
577 break;
509 default: 578 default:
510 return NULL; 579 return NULL;
511 } 580 }
@@ -515,7 +584,7 @@ static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
515 return ret; 584 return ret;
516} 585}
517 586
518static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count, 587static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
519 enum mlx4_resource type, int extra) 588 enum mlx4_resource type, int extra)
520{ 589{
521 int i; 590 int i;
@@ -523,7 +592,7 @@ static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
523 struct mlx4_priv *priv = mlx4_priv(dev); 592 struct mlx4_priv *priv = mlx4_priv(dev);
524 struct res_common **res_arr; 593 struct res_common **res_arr;
525 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 594 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
526 struct radix_tree_root *root = &tracker->res_tree[type]; 595 struct rb_root *root = &tracker->res_tree[type];
527 596
528 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL); 597 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
529 if (!res_arr) 598 if (!res_arr)
@@ -546,7 +615,7 @@ static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
546 err = -EEXIST; 615 err = -EEXIST;
547 goto undo; 616 goto undo;
548 } 617 }
549 err = radix_tree_insert(root, base + i, res_arr[i]); 618 err = res_tracker_insert(root, res_arr[i]);
550 if (err) 619 if (err)
551 goto undo; 620 goto undo;
552 list_add_tail(&res_arr[i]->list, 621 list_add_tail(&res_arr[i]->list,
@@ -559,7 +628,7 @@ static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
559 628
560undo: 629undo:
561 for (--i; i >= base; --i) 630 for (--i; i >= base; --i)
562 radix_tree_delete(&tracker->res_tree[type], i); 631 rb_erase(&res_arr[i]->node, root);
563 632
564 spin_unlock_irq(mlx4_tlock(dev)); 633 spin_unlock_irq(mlx4_tlock(dev));
565 634
@@ -638,6 +707,16 @@ static int remove_xrcdn_ok(struct res_xrcdn *res)
638 return 0; 707 return 0;
639} 708}
640 709
710static int remove_fs_rule_ok(struct res_fs_rule *res)
711{
712 if (res->com.state == RES_FS_RULE_BUSY)
713 return -EBUSY;
714 else if (res->com.state != RES_FS_RULE_ALLOCATED)
715 return -EPERM;
716
717 return 0;
718}
719
641static int remove_cq_ok(struct res_cq *res) 720static int remove_cq_ok(struct res_cq *res)
642{ 721{
643 if (res->com.state == RES_CQ_BUSY) 722 if (res->com.state == RES_CQ_BUSY)
@@ -679,15 +758,17 @@ static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
679 return remove_counter_ok((struct res_counter *)res); 758 return remove_counter_ok((struct res_counter *)res);
680 case RES_XRCD: 759 case RES_XRCD:
681 return remove_xrcdn_ok((struct res_xrcdn *)res); 760 return remove_xrcdn_ok((struct res_xrcdn *)res);
761 case RES_FS_RULE:
762 return remove_fs_rule_ok((struct res_fs_rule *)res);
682 default: 763 default:
683 return -EINVAL; 764 return -EINVAL;
684 } 765 }
685} 766}
686 767
687static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count, 768static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
688 enum mlx4_resource type, int extra) 769 enum mlx4_resource type, int extra)
689{ 770{
690 int i; 771 u64 i;
691 int err; 772 int err;
692 struct mlx4_priv *priv = mlx4_priv(dev); 773 struct mlx4_priv *priv = mlx4_priv(dev);
693 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 774 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
@@ -695,7 +776,7 @@ static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
695 776
696 spin_lock_irq(mlx4_tlock(dev)); 777 spin_lock_irq(mlx4_tlock(dev));
697 for (i = base; i < base + count; ++i) { 778 for (i = base; i < base + count; ++i) {
698 r = radix_tree_lookup(&tracker->res_tree[type], i); 779 r = res_tracker_lookup(&tracker->res_tree[type], i);
699 if (!r) { 780 if (!r) {
700 err = -ENOENT; 781 err = -ENOENT;
701 goto out; 782 goto out;
@@ -710,8 +791,8 @@ static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
710 } 791 }
711 792
712 for (i = base; i < base + count; ++i) { 793 for (i = base; i < base + count; ++i) {
713 r = radix_tree_lookup(&tracker->res_tree[type], i); 794 r = res_tracker_lookup(&tracker->res_tree[type], i);
714 radix_tree_delete(&tracker->res_tree[type], i); 795 rb_erase(&r->node, &tracker->res_tree[type]);
715 list_del(&r->list); 796 list_del(&r->list);
716 kfree(r); 797 kfree(r);
717 } 798 }
@@ -733,7 +814,7 @@ static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
733 int err = 0; 814 int err = 0;
734 815
735 spin_lock_irq(mlx4_tlock(dev)); 816 spin_lock_irq(mlx4_tlock(dev));
736 r = radix_tree_lookup(&tracker->res_tree[RES_QP], qpn); 817 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
737 if (!r) 818 if (!r)
738 err = -ENOENT; 819 err = -ENOENT;
739 else if (r->com.owner != slave) 820 else if (r->com.owner != slave)
@@ -741,7 +822,7 @@ static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
741 else { 822 else {
742 switch (state) { 823 switch (state) {
743 case RES_QP_BUSY: 824 case RES_QP_BUSY:
744 mlx4_dbg(dev, "%s: failed RES_QP, 0x%x\n", 825 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
745 __func__, r->com.res_id); 826 __func__, r->com.res_id);
746 err = -EBUSY; 827 err = -EBUSY;
747 break; 828 break;
@@ -750,7 +831,7 @@ static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
750 if (r->com.state == RES_QP_MAPPED && !alloc) 831 if (r->com.state == RES_QP_MAPPED && !alloc)
751 break; 832 break;
752 833
753 mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id); 834 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
754 err = -EINVAL; 835 err = -EINVAL;
755 break; 836 break;
756 837
@@ -759,7 +840,7 @@ static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
759 r->com.state == RES_QP_HW) 840 r->com.state == RES_QP_HW)
760 break; 841 break;
761 else { 842 else {
762 mlx4_dbg(dev, "failed RES_QP, 0x%x\n", 843 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
763 r->com.res_id); 844 r->com.res_id);
764 err = -EINVAL; 845 err = -EINVAL;
765 } 846 }
@@ -779,7 +860,7 @@ static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
779 r->com.to_state = state; 860 r->com.to_state = state;
780 r->com.state = RES_QP_BUSY; 861 r->com.state = RES_QP_BUSY;
781 if (qp) 862 if (qp)
782 *qp = (struct res_qp *)r; 863 *qp = r;
783 } 864 }
784 } 865 }
785 866
@@ -797,7 +878,7 @@ static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
797 int err = 0; 878 int err = 0;
798 879
799 spin_lock_irq(mlx4_tlock(dev)); 880 spin_lock_irq(mlx4_tlock(dev));
800 r = radix_tree_lookup(&tracker->res_tree[RES_MPT], index); 881 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
801 if (!r) 882 if (!r)
802 err = -ENOENT; 883 err = -ENOENT;
803 else if (r->com.owner != slave) 884 else if (r->com.owner != slave)
@@ -832,7 +913,7 @@ static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
832 r->com.to_state = state; 913 r->com.to_state = state;
833 r->com.state = RES_MPT_BUSY; 914 r->com.state = RES_MPT_BUSY;
834 if (mpt) 915 if (mpt)
835 *mpt = (struct res_mpt *)r; 916 *mpt = r;
836 } 917 }
837 } 918 }
838 919
@@ -850,7 +931,7 @@ static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
850 int err = 0; 931 int err = 0;
851 932
852 spin_lock_irq(mlx4_tlock(dev)); 933 spin_lock_irq(mlx4_tlock(dev));
853 r = radix_tree_lookup(&tracker->res_tree[RES_EQ], index); 934 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
854 if (!r) 935 if (!r)
855 err = -ENOENT; 936 err = -ENOENT;
856 else if (r->com.owner != slave) 937 else if (r->com.owner != slave)
@@ -898,7 +979,7 @@ static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
898 int err; 979 int err;
899 980
900 spin_lock_irq(mlx4_tlock(dev)); 981 spin_lock_irq(mlx4_tlock(dev));
901 r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn); 982 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
902 if (!r) 983 if (!r)
903 err = -ENOENT; 984 err = -ENOENT;
904 else if (r->com.owner != slave) 985 else if (r->com.owner != slave)
@@ -952,7 +1033,7 @@ static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
952 int err = 0; 1033 int err = 0;
953 1034
954 spin_lock_irq(mlx4_tlock(dev)); 1035 spin_lock_irq(mlx4_tlock(dev));
955 r = radix_tree_lookup(&tracker->res_tree[RES_SRQ], index); 1036 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
956 if (!r) 1037 if (!r)
957 err = -ENOENT; 1038 err = -ENOENT;
958 else if (r->com.owner != slave) 1039 else if (r->com.owner != slave)
@@ -1001,7 +1082,7 @@ static void res_abort_move(struct mlx4_dev *dev, int slave,
1001 struct res_common *r; 1082 struct res_common *r;
1002 1083
1003 spin_lock_irq(mlx4_tlock(dev)); 1084 spin_lock_irq(mlx4_tlock(dev));
1004 r = radix_tree_lookup(&tracker->res_tree[type], id); 1085 r = res_tracker_lookup(&tracker->res_tree[type], id);
1005 if (r && (r->owner == slave)) 1086 if (r && (r->owner == slave))
1006 r->state = r->from_state; 1087 r->state = r->from_state;
1007 spin_unlock_irq(mlx4_tlock(dev)); 1088 spin_unlock_irq(mlx4_tlock(dev));
@@ -1015,7 +1096,7 @@ static void res_end_move(struct mlx4_dev *dev, int slave,
1015 struct res_common *r; 1096 struct res_common *r;
1016 1097
1017 spin_lock_irq(mlx4_tlock(dev)); 1098 spin_lock_irq(mlx4_tlock(dev));
1018 r = radix_tree_lookup(&tracker->res_tree[type], id); 1099 r = res_tracker_lookup(&tracker->res_tree[type], id);
1019 if (r && (r->owner == slave)) 1100 if (r && (r->owner == slave))
1020 r->state = r->to_state; 1101 r->state = r->to_state;
1021 spin_unlock_irq(mlx4_tlock(dev)); 1102 spin_unlock_irq(mlx4_tlock(dev));
@@ -2695,6 +2776,60 @@ ex_put:
2695 return err; 2776 return err;
2696} 2777}
2697 2778
2779int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2780 struct mlx4_vhcr *vhcr,
2781 struct mlx4_cmd_mailbox *inbox,
2782 struct mlx4_cmd_mailbox *outbox,
2783 struct mlx4_cmd_info *cmd)
2784{
2785 int err;
2786
2787 if (dev->caps.steering_mode !=
2788 MLX4_STEERING_MODE_DEVICE_MANAGED)
2789 return -EOPNOTSUPP;
2790
2791 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
2792 vhcr->in_modifier, 0,
2793 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
2794 MLX4_CMD_NATIVE);
2795 if (err)
2796 return err;
2797
2798 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0);
2799 if (err) {
2800 mlx4_err(dev, "Fail to add flow steering resources.\n ");
2801 /* detach rule*/
2802 mlx4_cmd(dev, vhcr->out_param, 0, 0,
2803 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
2804 MLX4_CMD_NATIVE);
2805 }
2806 return err;
2807}
2808
2809int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
2810 struct mlx4_vhcr *vhcr,
2811 struct mlx4_cmd_mailbox *inbox,
2812 struct mlx4_cmd_mailbox *outbox,
2813 struct mlx4_cmd_info *cmd)
2814{
2815 int err;
2816
2817 if (dev->caps.steering_mode !=
2818 MLX4_STEERING_MODE_DEVICE_MANAGED)
2819 return -EOPNOTSUPP;
2820
2821 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
2822 if (err) {
2823 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
2824 return err;
2825 }
2826
2827 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
2828 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
2829 MLX4_CMD_NATIVE);
2830 return err;
2831}
2832
2698enum { 2833enum {
2699 BUSY_MAX_RETRIES = 10 2834 BUSY_MAX_RETRIES = 10
2700}; 2835};
@@ -2751,7 +2886,7 @@ static int _move_all_busy(struct mlx4_dev *dev, int slave,
2751 if (r->state == RES_ANY_BUSY) { 2886 if (r->state == RES_ANY_BUSY) {
2752 if (print) 2887 if (print)
2753 mlx4_dbg(dev, 2888 mlx4_dbg(dev,
2754 "%s id 0x%x is busy\n", 2889 "%s id 0x%llx is busy\n",
2755 ResourceType(type), 2890 ResourceType(type),
2756 r->res_id); 2891 r->res_id);
2757 ++busy; 2892 ++busy;
@@ -2817,8 +2952,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
2817 switch (state) { 2952 switch (state) {
2818 case RES_QP_RESERVED: 2953 case RES_QP_RESERVED:
2819 spin_lock_irq(mlx4_tlock(dev)); 2954 spin_lock_irq(mlx4_tlock(dev));
2820 radix_tree_delete(&tracker->res_tree[RES_QP], 2955 rb_erase(&qp->com.node,
2821 qp->com.res_id); 2956 &tracker->res_tree[RES_QP]);
2822 list_del(&qp->com.list); 2957 list_del(&qp->com.list);
2823 spin_unlock_irq(mlx4_tlock(dev)); 2958 spin_unlock_irq(mlx4_tlock(dev));
2824 kfree(qp); 2959 kfree(qp);
@@ -2888,8 +3023,8 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
2888 case RES_SRQ_ALLOCATED: 3023 case RES_SRQ_ALLOCATED:
2889 __mlx4_srq_free_icm(dev, srqn); 3024 __mlx4_srq_free_icm(dev, srqn);
2890 spin_lock_irq(mlx4_tlock(dev)); 3025 spin_lock_irq(mlx4_tlock(dev));
2891 radix_tree_delete(&tracker->res_tree[RES_SRQ], 3026 rb_erase(&srq->com.node,
2892 srqn); 3027 &tracker->res_tree[RES_SRQ]);
2893 list_del(&srq->com.list); 3028 list_del(&srq->com.list);
2894 spin_unlock_irq(mlx4_tlock(dev)); 3029 spin_unlock_irq(mlx4_tlock(dev));
2895 kfree(srq); 3030 kfree(srq);
@@ -2954,8 +3089,8 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
2954 case RES_CQ_ALLOCATED: 3089 case RES_CQ_ALLOCATED:
2955 __mlx4_cq_free_icm(dev, cqn); 3090 __mlx4_cq_free_icm(dev, cqn);
2956 spin_lock_irq(mlx4_tlock(dev)); 3091 spin_lock_irq(mlx4_tlock(dev));
2957 radix_tree_delete(&tracker->res_tree[RES_CQ], 3092 rb_erase(&cq->com.node,
2958 cqn); 3093 &tracker->res_tree[RES_CQ]);
2959 list_del(&cq->com.list); 3094 list_del(&cq->com.list);
2960 spin_unlock_irq(mlx4_tlock(dev)); 3095 spin_unlock_irq(mlx4_tlock(dev));
2961 kfree(cq); 3096 kfree(cq);
@@ -3017,8 +3152,8 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
3017 case RES_MPT_RESERVED: 3152 case RES_MPT_RESERVED:
3018 __mlx4_mr_release(dev, mpt->key); 3153 __mlx4_mr_release(dev, mpt->key);
3019 spin_lock_irq(mlx4_tlock(dev)); 3154 spin_lock_irq(mlx4_tlock(dev));
3020 radix_tree_delete(&tracker->res_tree[RES_MPT], 3155 rb_erase(&mpt->com.node,
3021 mptn); 3156 &tracker->res_tree[RES_MPT]);
3022 list_del(&mpt->com.list); 3157 list_del(&mpt->com.list);
3023 spin_unlock_irq(mlx4_tlock(dev)); 3158 spin_unlock_irq(mlx4_tlock(dev));
3024 kfree(mpt); 3159 kfree(mpt);
@@ -3086,8 +3221,8 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
3086 __mlx4_free_mtt_range(dev, base, 3221 __mlx4_free_mtt_range(dev, base,
3087 mtt->order); 3222 mtt->order);
3088 spin_lock_irq(mlx4_tlock(dev)); 3223 spin_lock_irq(mlx4_tlock(dev));
3089 radix_tree_delete(&tracker->res_tree[RES_MTT], 3224 rb_erase(&mtt->com.node,
3090 base); 3225 &tracker->res_tree[RES_MTT]);
3091 list_del(&mtt->com.list); 3226 list_del(&mtt->com.list);
3092 spin_unlock_irq(mlx4_tlock(dev)); 3227 spin_unlock_irq(mlx4_tlock(dev));
3093 kfree(mtt); 3228 kfree(mtt);
@@ -3104,6 +3239,58 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
3104 spin_unlock_irq(mlx4_tlock(dev)); 3239 spin_unlock_irq(mlx4_tlock(dev));
3105} 3240}
3106 3241
3242static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
3243{
3244 struct mlx4_priv *priv = mlx4_priv(dev);
3245 struct mlx4_resource_tracker *tracker =
3246 &priv->mfunc.master.res_tracker;
3247 struct list_head *fs_rule_list =
3248 &tracker->slave_list[slave].res_list[RES_FS_RULE];
3249 struct res_fs_rule *fs_rule;
3250 struct res_fs_rule *tmp;
3251 int state;
3252 u64 base;
3253 int err;
3254
3255 err = move_all_busy(dev, slave, RES_FS_RULE);
3256 if (err)
3257 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
3258 slave);
3259
3260 spin_lock_irq(mlx4_tlock(dev));
3261 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
3262 spin_unlock_irq(mlx4_tlock(dev));
3263 if (fs_rule->com.owner == slave) {
3264 base = fs_rule->com.res_id;
3265 state = fs_rule->com.from_state;
3266 while (state != 0) {
3267 switch (state) {
3268 case RES_FS_RULE_ALLOCATED:
3269 /* detach rule */
3270 err = mlx4_cmd(dev, base, 0, 0,
3271 MLX4_QP_FLOW_STEERING_DETACH,
3272 MLX4_CMD_TIME_CLASS_A,
3273 MLX4_CMD_NATIVE);
3274
3275 spin_lock_irq(mlx4_tlock(dev));
3276 rb_erase(&fs_rule->com.node,
3277 &tracker->res_tree[RES_FS_RULE]);
3278 list_del(&fs_rule->com.list);
3279 spin_unlock_irq(mlx4_tlock(dev));
3280 kfree(fs_rule);
3281 state = 0;
3282 break;
3283
3284 default:
3285 state = 0;
3286 }
3287 }
3288 }
3289 spin_lock_irq(mlx4_tlock(dev));
3290 }
3291 spin_unlock_irq(mlx4_tlock(dev));
3292}
3293
3107static void rem_slave_eqs(struct mlx4_dev *dev, int slave) 3294static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3108{ 3295{
3109 struct mlx4_priv *priv = mlx4_priv(dev); 3296 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -3133,8 +3320,8 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3133 switch (state) { 3320 switch (state) {
3134 case RES_EQ_RESERVED: 3321 case RES_EQ_RESERVED:
3135 spin_lock_irq(mlx4_tlock(dev)); 3322 spin_lock_irq(mlx4_tlock(dev));
3136 radix_tree_delete(&tracker->res_tree[RES_EQ], 3323 rb_erase(&eq->com.node,
3137 eqn); 3324 &tracker->res_tree[RES_EQ]);
3138 list_del(&eq->com.list); 3325 list_del(&eq->com.list);
3139 spin_unlock_irq(mlx4_tlock(dev)); 3326 spin_unlock_irq(mlx4_tlock(dev));
3140 kfree(eq); 3327 kfree(eq);
@@ -3191,7 +3378,8 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave)
3191 list_for_each_entry_safe(counter, tmp, counter_list, com.list) { 3378 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
3192 if (counter->com.owner == slave) { 3379 if (counter->com.owner == slave) {
3193 index = counter->com.res_id; 3380 index = counter->com.res_id;
3194 radix_tree_delete(&tracker->res_tree[RES_COUNTER], index); 3381 rb_erase(&counter->com.node,
3382 &tracker->res_tree[RES_COUNTER]);
3195 list_del(&counter->com.list); 3383 list_del(&counter->com.list);
3196 kfree(counter); 3384 kfree(counter);
3197 __mlx4_counter_free(dev, index); 3385 __mlx4_counter_free(dev, index);
@@ -3220,7 +3408,7 @@ static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
3220 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) { 3408 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
3221 if (xrcd->com.owner == slave) { 3409 if (xrcd->com.owner == slave) {
3222 xrcdn = xrcd->com.res_id; 3410 xrcdn = xrcd->com.res_id;
3223 radix_tree_delete(&tracker->res_tree[RES_XRCD], xrcdn); 3411 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
3224 list_del(&xrcd->com.list); 3412 list_del(&xrcd->com.list);
3225 kfree(xrcd); 3413 kfree(xrcd);
3226 __mlx4_xrcd_free(dev, xrcdn); 3414 __mlx4_xrcd_free(dev, xrcdn);
@@ -3244,5 +3432,6 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3244 rem_slave_mtts(dev, slave); 3432 rem_slave_mtts(dev, slave);
3245 rem_slave_counters(dev, slave); 3433 rem_slave_counters(dev, slave);
3246 rem_slave_xrcdns(dev, slave); 3434 rem_slave_xrcdns(dev, slave);
3435 rem_slave_fs_rule(dev, slave);
3247 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); 3436 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3248} 3437}
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 5e313e9a252f..1540ebeb8669 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -422,7 +422,7 @@ static void ks8851_read_mac_addr(struct net_device *dev)
422 * 422 *
423 * Get or create the initial mac address for the device and then set that 423 * Get or create the initial mac address for the device and then set that
424 * into the station address register. If there is an EEPROM present, then 424 * into the station address register. If there is an EEPROM present, then
425 * we try that. If no valid mac address is found we use random_ether_addr() 425 * we try that. If no valid mac address is found we use eth_random_addr()
426 * to create a new one. 426 * to create a new one.
427 */ 427 */
428static void ks8851_init_mac(struct ks8851_net *ks) 428static void ks8851_init_mac(struct ks8851_net *ks)
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index 5ffde23ac8fb..38529edfe350 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -16,8 +16,7 @@
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */ 17 */
18 18
19/** 19/* Supports:
20 * Supports:
21 * KS8851 16bit MLL chip from Micrel Inc. 20 * KS8851 16bit MLL chip from Micrel Inc.
22 */ 21 */
23 22
@@ -35,7 +34,7 @@
35#include <linux/platform_device.h> 34#include <linux/platform_device.h>
36#include <linux/delay.h> 35#include <linux/delay.h>
37#include <linux/slab.h> 36#include <linux/slab.h>
38#include <asm/io.h> 37#include <linux/ks8851_mll.h>
39 38
40#define DRV_NAME "ks8851_mll" 39#define DRV_NAME "ks8851_mll"
41 40
@@ -465,8 +464,7 @@ static int msg_enable;
465#define BE1 0x2000 /* Byte Enable 1 */ 464#define BE1 0x2000 /* Byte Enable 1 */
466#define BE0 0x1000 /* Byte Enable 0 */ 465#define BE0 0x1000 /* Byte Enable 0 */
467 466
468/** 467/* register read/write calls.
469 * register read/write calls.
470 * 468 *
471 * All these calls issue transactions to access the chip's registers. They 469 * All these calls issue transactions to access the chip's registers. They
472 * all require that the necessary lock is held to prevent accesses when the 470 * all require that the necessary lock is held to prevent accesses when the
@@ -1103,7 +1101,7 @@ static void ks_set_grpaddr(struct ks_net *ks)
1103 } 1101 }
1104} /* ks_set_grpaddr */ 1102} /* ks_set_grpaddr */
1105 1103
1106/* 1104/**
1107* ks_clear_mcast - clear multicast information 1105* ks_clear_mcast - clear multicast information
1108* 1106*
1109* @ks : The chip information 1107* @ks : The chip information
@@ -1515,6 +1513,7 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
1515 struct net_device *netdev; 1513 struct net_device *netdev;
1516 struct ks_net *ks; 1514 struct ks_net *ks;
1517 u16 id, data; 1515 u16 id, data;
1516 struct ks8851_mll_platform_data *pdata;
1518 1517
1519 io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1518 io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1520 io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1519 io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
@@ -1596,17 +1595,27 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
1596 ks_disable_qmu(ks); 1595 ks_disable_qmu(ks);
1597 ks_setup(ks); 1596 ks_setup(ks);
1598 ks_setup_int(ks); 1597 ks_setup_int(ks);
1599 memcpy(netdev->dev_addr, ks->mac_addr, 6);
1600 1598
1601 data = ks_rdreg16(ks, KS_OBCR); 1599 data = ks_rdreg16(ks, KS_OBCR);
1602 ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA); 1600 ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA);
1603 1601
1604 /** 1602 /* overwriting the default MAC address */
1605 * If you want to use the default MAC addr, 1603 pdata = pdev->dev.platform_data;
1606 * comment out the 2 functions below. 1604 if (!pdata) {
1607 */ 1605 netdev_err(netdev, "No platform data\n");
1606 err = -ENODEV;
1607 goto err_pdata;
1608 }
1609 memcpy(ks->mac_addr, pdata->mac_addr, 6);
1610 if (!is_valid_ether_addr(ks->mac_addr)) {
1611 /* Use random MAC address if none passed */
1612 eth_random_addr(ks->mac_addr);
1613 netdev_info(netdev, "Using random mac address\n");
1614 }
1615 netdev_info(netdev, "Mac address is: %pM\n", ks->mac_addr);
1616
1617 memcpy(netdev->dev_addr, ks->mac_addr, 6);
1608 1618
1609 random_ether_addr(netdev->dev_addr);
1610 ks_set_mac(ks, netdev->dev_addr); 1619 ks_set_mac(ks, netdev->dev_addr);
1611 1620
1612 id = ks_rdreg16(ks, KS_CIDER); 1621 id = ks_rdreg16(ks, KS_CIDER);
@@ -1615,6 +1624,8 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
1615 (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7); 1624 (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
1616 return 0; 1625 return 0;
1617 1626
1627err_pdata:
1628 unregister_netdev(netdev);
1618err_register: 1629err_register:
1619err_get_irq: 1630err_get_irq:
1620 iounmap(ks->hw_addr_cmd); 1631 iounmap(ks->hw_addr_cmd);
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index eaf9ff0262a9..318fee91c79d 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -3913,7 +3913,7 @@ static void hw_start_rx(struct ksz_hw *hw)
3913 hw->rx_stop = 2; 3913 hw->rx_stop = 2;
3914} 3914}
3915 3915
3916/* 3916/**
3917 * hw_stop_rx - stop receiving 3917 * hw_stop_rx - stop receiving
3918 * @hw: The hardware instance. 3918 * @hw: The hardware instance.
3919 * 3919 *
@@ -4480,14 +4480,12 @@ static void ksz_init_rx_buffers(struct dev_info *adapter)
4480 dma_buf->len = adapter->mtu; 4480 dma_buf->len = adapter->mtu;
4481 if (!dma_buf->skb) 4481 if (!dma_buf->skb)
4482 dma_buf->skb = alloc_skb(dma_buf->len, GFP_ATOMIC); 4482 dma_buf->skb = alloc_skb(dma_buf->len, GFP_ATOMIC);
4483 if (dma_buf->skb && !dma_buf->dma) { 4483 if (dma_buf->skb && !dma_buf->dma)
4484 dma_buf->skb->dev = adapter->dev;
4485 dma_buf->dma = pci_map_single( 4484 dma_buf->dma = pci_map_single(
4486 adapter->pdev, 4485 adapter->pdev,
4487 skb_tail_pointer(dma_buf->skb), 4486 skb_tail_pointer(dma_buf->skb),
4488 dma_buf->len, 4487 dma_buf->len,
4489 PCI_DMA_FROMDEVICE); 4488 PCI_DMA_FROMDEVICE);
4490 }
4491 4489
4492 /* Set descriptor. */ 4490 /* Set descriptor. */
4493 set_rx_buf(desc, dma_buf->dma); 4491 set_rx_buf(desc, dma_buf->dma);
@@ -4881,8 +4879,8 @@ static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev)
4881 left = hw_alloc_pkt(hw, skb->len, num); 4879 left = hw_alloc_pkt(hw, skb->len, num);
4882 if (left) { 4880 if (left) {
4883 if (left < num || 4881 if (left < num ||
4884 ((CHECKSUM_PARTIAL == skb->ip_summed) && 4882 (CHECKSUM_PARTIAL == skb->ip_summed &&
4885 (ETH_P_IPV6 == htons(skb->protocol)))) { 4883 skb->protocol == htons(ETH_P_IPV6))) {
4886 struct sk_buff *org_skb = skb; 4884 struct sk_buff *org_skb = skb;
4887 4885
4888 skb = netdev_alloc_skb(dev, org_skb->len); 4886 skb = netdev_alloc_skb(dev, org_skb->len);
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 90153fc983cb..fa85cf1353fd 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3775,7 +3775,7 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
3775 3775
3776 mgp->num_slices = 1; 3776 mgp->num_slices = 1;
3777 msix_cap = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 3777 msix_cap = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
3778 ncpus = num_online_cpus(); 3778 ncpus = netif_get_num_default_rss_queues();
3779 3779
3780 if (myri10ge_max_slices == 1 || msix_cap == 0 || 3780 if (myri10ge_max_slices == 1 || msix_cap == 0 ||
3781 (myri10ge_max_slices == -1 && ncpus < 2)) 3781 (myri10ge_max_slices == -1 && ncpus < 2))
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index bb367582c1e8..d958c2299372 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -3377,7 +3377,7 @@ static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3377 } while (cnt < 20); 3377 } while (cnt < 20);
3378 return ret; 3378 return ret;
3379} 3379}
3380/* 3380/**
3381 * check_pci_device_id - Checks if the device id is supported 3381 * check_pci_device_id - Checks if the device id is supported
3382 * @id : device id 3382 * @id : device id
3383 * Description: Function to check if the pci device id is supported by driver. 3383 * Description: Function to check if the pci device id is supported by driver.
@@ -5238,7 +5238,7 @@ static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5238} 5238}
5239 5239
5240/** 5240/**
5241 * s2io_set_mac_addr driver entry point 5241 * s2io_set_mac_addr - driver entry point
5242 */ 5242 */
5243 5243
5244static int s2io_set_mac_addr(struct net_device *dev, void *p) 5244static int s2io_set_mac_addr(struct net_device *dev, void *p)
@@ -6088,7 +6088,7 @@ static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
6088} 6088}
6089 6089
6090/** 6090/**
6091 * s2io-link_test - verifies the link state of the nic 6091 * s2io_link_test - verifies the link state of the nic
6092 * @sp ; private member of the device structure, which is a pointer to the 6092 * @sp ; private member of the device structure, which is a pointer to the
6093 * s2io_nic structure. 6093 * s2io_nic structure.
6094 * @data: variable that returns the result of each of the test conducted by 6094 * @data: variable that returns the result of each of the test conducted by
@@ -6116,9 +6116,9 @@ static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
6116 6116
6117/** 6117/**
6118 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC 6118 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6119 * @sp - private member of the device structure, which is a pointer to the 6119 * @sp: private member of the device structure, which is a pointer to the
6120 * s2io_nic structure. 6120 * s2io_nic structure.
6121 * @data - variable that returns the result of each of the test 6121 * @data: variable that returns the result of each of the test
6122 * conducted by the driver. 6122 * conducted by the driver.
6123 * Description: 6123 * Description:
6124 * This is one of the offline test that tests the read and write 6124 * This is one of the offline test that tests the read and write
@@ -6946,9 +6946,9 @@ static int rxd_owner_bit_reset(struct s2io_nic *sp)
6946 if (sp->rxd_mode == RXD_MODE_3B) 6946 if (sp->rxd_mode == RXD_MODE_3B)
6947 ba = &ring->ba[j][k]; 6947 ba = &ring->ba[j][k];
6948 if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb, 6948 if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
6949 (u64 *)&temp0_64, 6949 &temp0_64,
6950 (u64 *)&temp1_64, 6950 &temp1_64,
6951 (u64 *)&temp2_64, 6951 &temp2_64,
6952 size) == -ENOMEM) { 6952 size) == -ENOMEM) {
6953 return 0; 6953 return 0;
6954 } 6954 }
@@ -7149,7 +7149,7 @@ static int s2io_card_up(struct s2io_nic *sp)
7149 int i, ret = 0; 7149 int i, ret = 0;
7150 struct config_param *config; 7150 struct config_param *config;
7151 struct mac_info *mac_control; 7151 struct mac_info *mac_control;
7152 struct net_device *dev = (struct net_device *)sp->dev; 7152 struct net_device *dev = sp->dev;
7153 u16 interruptible; 7153 u16 interruptible;
7154 7154
7155 /* Initialize the H/W I/O registers */ 7155 /* Initialize the H/W I/O registers */
@@ -7325,7 +7325,7 @@ static void s2io_tx_watchdog(struct net_device *dev)
7325static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) 7325static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7326{ 7326{
7327 struct s2io_nic *sp = ring_data->nic; 7327 struct s2io_nic *sp = ring_data->nic;
7328 struct net_device *dev = (struct net_device *)ring_data->dev; 7328 struct net_device *dev = ring_data->dev;
7329 struct sk_buff *skb = (struct sk_buff *) 7329 struct sk_buff *skb = (struct sk_buff *)
7330 ((unsigned long)rxdp->Host_Control); 7330 ((unsigned long)rxdp->Host_Control);
7331 int ring_no = ring_data->ring_no; 7331 int ring_no = ring_data->ring_no;
@@ -7508,7 +7508,7 @@ aggregate:
7508 7508
7509static void s2io_link(struct s2io_nic *sp, int link) 7509static void s2io_link(struct s2io_nic *sp, int link)
7510{ 7510{
7511 struct net_device *dev = (struct net_device *)sp->dev; 7511 struct net_device *dev = sp->dev;
7512 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; 7512 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7513 7513
7514 if (link != sp->last_link_state) { 7514 if (link != sp->last_link_state) {
@@ -8280,7 +8280,7 @@ static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8280 return -1; 8280 return -1;
8281 } 8281 }
8282 8282
8283 *ip = (struct iphdr *)((u8 *)buffer + ip_off); 8283 *ip = (struct iphdr *)(buffer + ip_off);
8284 ip_len = (u8)((*ip)->ihl); 8284 ip_len = (u8)((*ip)->ihl);
8285 ip_len <<= 2; 8285 ip_len <<= 2;
8286 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len); 8286 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 98e2c10ae08b..32d06824fe3e 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -2346,7 +2346,7 @@ void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
2346 2346
2347 for (i = 0; i < nreq; i++) 2347 for (i = 0; i < nreq; i++)
2348 vxge_os_dma_malloc_async( 2348 vxge_os_dma_malloc_async(
2349 ((struct __vxge_hw_device *)blockpool->hldev)->pdev, 2349 (blockpool->hldev)->pdev,
2350 blockpool->hldev, VXGE_HW_BLOCK_SIZE); 2350 blockpool->hldev, VXGE_HW_BLOCK_SIZE);
2351} 2351}
2352 2352
@@ -2428,13 +2428,13 @@ __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
2428 break; 2428 break;
2429 2429
2430 pci_unmap_single( 2430 pci_unmap_single(
2431 ((struct __vxge_hw_device *)blockpool->hldev)->pdev, 2431 (blockpool->hldev)->pdev,
2432 ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, 2432 ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
2433 ((struct __vxge_hw_blockpool_entry *)p)->length, 2433 ((struct __vxge_hw_blockpool_entry *)p)->length,
2434 PCI_DMA_BIDIRECTIONAL); 2434 PCI_DMA_BIDIRECTIONAL);
2435 2435
2436 vxge_os_dma_free( 2436 vxge_os_dma_free(
2437 ((struct __vxge_hw_device *)blockpool->hldev)->pdev, 2437 (blockpool->hldev)->pdev,
2438 ((struct __vxge_hw_blockpool_entry *)p)->memblock, 2438 ((struct __vxge_hw_blockpool_entry *)p)->memblock,
2439 &((struct __vxge_hw_blockpool_entry *)p)->acc_handle); 2439 &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
2440 2440
@@ -4059,7 +4059,7 @@ __vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
4059 enum vxge_hw_status status = VXGE_HW_OK; 4059 enum vxge_hw_status status = VXGE_HW_OK;
4060 struct __vxge_hw_virtualpath *vpath; 4060 struct __vxge_hw_virtualpath *vpath;
4061 4061
4062 vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id]; 4062 vpath = &hldev->virtual_paths[vp_id];
4063 4063
4064 if (vpath->ringh) { 4064 if (vpath->ringh) {
4065 status = __vxge_hw_ring_reset(vpath->ringh); 4065 status = __vxge_hw_ring_reset(vpath->ringh);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
index 5046a64f0fe8..9e0c1eed5dc5 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
@@ -1922,7 +1922,7 @@ realloc:
1922 /* misaligned, free current one and try allocating 1922 /* misaligned, free current one and try allocating
1923 * size + VXGE_CACHE_LINE_SIZE memory 1923 * size + VXGE_CACHE_LINE_SIZE memory
1924 */ 1924 */
1925 kfree((void *) vaddr); 1925 kfree(vaddr);
1926 size += VXGE_CACHE_LINE_SIZE; 1926 size += VXGE_CACHE_LINE_SIZE;
1927 realloc_flag = 1; 1927 realloc_flag = 1;
1928 goto realloc; 1928 goto realloc;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 51387c31914b..4e20c5f02712 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -1134,7 +1134,7 @@ static void vxge_set_multicast(struct net_device *dev)
1134 "%s:%d", __func__, __LINE__); 1134 "%s:%d", __func__, __LINE__);
1135 1135
1136 vdev = netdev_priv(dev); 1136 vdev = netdev_priv(dev);
1137 hldev = (struct __vxge_hw_device *)vdev->devh; 1137 hldev = vdev->devh;
1138 1138
1139 if (unlikely(!is_vxge_card_up(vdev))) 1139 if (unlikely(!is_vxge_card_up(vdev)))
1140 return; 1140 return;
@@ -3687,7 +3687,8 @@ static int __devinit vxge_config_vpaths(
3687 return 0; 3687 return 0;
3688 3688
3689 if (!driver_config->g_no_cpus) 3689 if (!driver_config->g_no_cpus)
3690 driver_config->g_no_cpus = num_online_cpus(); 3690 driver_config->g_no_cpus =
3691 netif_get_num_default_rss_queues();
3691 3692
3692 driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1; 3693 driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1;
3693 if (!driver_config->vpath_per_dev) 3694 if (!driver_config->vpath_per_dev)
@@ -3989,16 +3990,16 @@ static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
3989 continue; 3990 continue;
3990 vxge_debug_ll_config(VXGE_TRACE, 3991 vxge_debug_ll_config(VXGE_TRACE,
3991 "%s: MTU size - %d", vdev->ndev->name, 3992 "%s: MTU size - %d", vdev->ndev->name,
3992 ((struct __vxge_hw_device *)(vdev->devh))-> 3993 ((vdev->devh))->
3993 config.vp_config[i].mtu); 3994 config.vp_config[i].mtu);
3994 vxge_debug_init(VXGE_TRACE, 3995 vxge_debug_init(VXGE_TRACE,
3995 "%s: VLAN tag stripping %s", vdev->ndev->name, 3996 "%s: VLAN tag stripping %s", vdev->ndev->name,
3996 ((struct __vxge_hw_device *)(vdev->devh))-> 3997 ((vdev->devh))->
3997 config.vp_config[i].rpa_strip_vlan_tag 3998 config.vp_config[i].rpa_strip_vlan_tag
3998 ? "Enabled" : "Disabled"); 3999 ? "Enabled" : "Disabled");
3999 vxge_debug_ll_config(VXGE_TRACE, 4000 vxge_debug_ll_config(VXGE_TRACE,
4000 "%s: Max frags : %d", vdev->ndev->name, 4001 "%s: Max frags : %d", vdev->ndev->name,
4001 ((struct __vxge_hw_device *)(vdev->devh))-> 4002 ((vdev->devh))->
4002 config.vp_config[i].fifo.max_frags); 4003 config.vp_config[i].fifo.max_frags);
4003 break; 4004 break;
4004 } 4005 }
@@ -4260,9 +4261,7 @@ static int vxge_probe_fw_update(struct vxgedev *vdev)
4260 if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) > 4261 if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) >
4261 VXGE_FW_VER(maj, min, 0)) { 4262 VXGE_FW_VER(maj, min, 0)) {
4262 vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to" 4263 vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to"
4263 " be used with this driver.\n" 4264 " be used with this driver.",
4264 "Please get the latest version from "
4265 "ftp://ftp.s2io.com/pub/X3100-Drivers/FIRMWARE",
4266 VXGE_DRIVER_NAME, maj, min, bld); 4265 VXGE_DRIVER_NAME, maj, min, bld);
4267 return -EINVAL; 4266 return -EINVAL;
4268 } 4267 }
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h
index 35f3e7552ec2..36ca40f8f249 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h
@@ -430,8 +430,7 @@ void vxge_initialize_ethtool_ops(struct net_device *ndev);
430enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev); 430enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
431int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override); 431int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
432 432
433/** 433/* #define VXGE_DEBUG_INIT: debug for initialization functions
434 * #define VXGE_DEBUG_INIT: debug for initialization functions
435 * #define VXGE_DEBUG_TX : debug transmit related functions 434 * #define VXGE_DEBUG_TX : debug transmit related functions
436 * #define VXGE_DEBUG_RX : debug recevice related functions 435 * #define VXGE_DEBUG_RX : debug recevice related functions
437 * #define VXGE_DEBUG_MEM : debug memory module 436 * #define VXGE_DEBUG_MEM : debug memory module
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
index 5954fa264da1..99749bd07d72 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
@@ -533,8 +533,7 @@ __vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
533 533
534 /* notify driver */ 534 /* notify driver */
535 if (hldev->uld_callbacks->crit_err) 535 if (hldev->uld_callbacks->crit_err)
536 hldev->uld_callbacks->crit_err( 536 hldev->uld_callbacks->crit_err(hldev,
537 (struct __vxge_hw_device *)hldev,
538 type, vp_id); 537 type, vp_id);
539out: 538out:
540 539
@@ -1322,7 +1321,7 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
1322 /* check whether it is not the end */ 1321 /* check whether it is not the end */
1323 if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) { 1322 if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
1324 1323
1325 vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control != 1324 vxge_assert((rxdp)->host_control !=
1326 0); 1325 0);
1327 1326
1328 ++ring->cmpl_cnt; 1327 ++ring->cmpl_cnt;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 928913c4f3ff..8b7c5129c7e1 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -3218,7 +3218,7 @@ static void nv_force_linkspeed(struct net_device *dev, int speed, int duplex)
3218} 3218}
3219 3219
3220/** 3220/**
3221 * nv_update_linkspeed: Setup the MAC according to the link partner 3221 * nv_update_linkspeed - Setup the MAC according to the link partner
3222 * @dev: Network device to be configured 3222 * @dev: Network device to be configured
3223 * 3223 *
3224 * The function queries the PHY and checks if there is a link partner. 3224 * The function queries the PHY and checks if there is a link partner.
@@ -3552,8 +3552,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
3552 return IRQ_HANDLED; 3552 return IRQ_HANDLED;
3553} 3553}
3554 3554
3555/** 3555/* All _optimized functions are used to help increase performance
3556 * All _optimized functions are used to help increase performance
3557 * (reduce CPU and increase throughput). They use descripter version 3, 3556 * (reduce CPU and increase throughput). They use descripter version 3,
3558 * compiler directives, and reduce memory accesses. 3557 * compiler directives, and reduce memory accesses.
3559 */ 3558 */
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 083d6715335c..4069edab229e 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -44,7 +44,6 @@
44#include <linux/of_net.h> 44#include <linux/of_net.h>
45#include <linux/types.h> 45#include <linux/types.h>
46 46
47#include <linux/delay.h>
48#include <linux/io.h> 47#include <linux/io.h>
49#include <mach/board.h> 48#include <mach/board.h>
50#include <mach/platform.h> 49#include <mach/platform.h>
@@ -52,7 +51,6 @@
52 51
53#define MODNAME "lpc-eth" 52#define MODNAME "lpc-eth"
54#define DRV_VERSION "1.00" 53#define DRV_VERSION "1.00"
55#define PHYDEF_ADDR 0x00
56 54
57#define ENET_MAXF_SIZE 1536 55#define ENET_MAXF_SIZE 1536
58#define ENET_RX_DESC 48 56#define ENET_RX_DESC 48
@@ -416,9 +414,6 @@ static bool use_iram_for_net(struct device *dev)
416#define TXDESC_CONTROL_LAST (1 << 30) 414#define TXDESC_CONTROL_LAST (1 << 30)
417#define TXDESC_CONTROL_INT (1 << 31) 415#define TXDESC_CONTROL_INT (1 << 31)
418 416
419static int lpc_eth_hard_start_xmit(struct sk_buff *skb,
420 struct net_device *ndev);
421
422/* 417/*
423 * Structure of a TX/RX descriptors and RX status 418 * Structure of a TX/RX descriptors and RX status
424 */ 419 */
@@ -440,7 +435,7 @@ struct netdata_local {
440 spinlock_t lock; 435 spinlock_t lock;
441 void __iomem *net_base; 436 void __iomem *net_base;
442 u32 msg_enable; 437 u32 msg_enable;
443 struct sk_buff *skb[ENET_TX_DESC]; 438 unsigned int skblen[ENET_TX_DESC];
444 unsigned int last_tx_idx; 439 unsigned int last_tx_idx;
445 unsigned int num_used_tx_buffs; 440 unsigned int num_used_tx_buffs;
446 struct mii_bus *mii_bus; 441 struct mii_bus *mii_bus;
@@ -903,12 +898,11 @@ err_out:
903static void __lpc_handle_xmit(struct net_device *ndev) 898static void __lpc_handle_xmit(struct net_device *ndev)
904{ 899{
905 struct netdata_local *pldat = netdev_priv(ndev); 900 struct netdata_local *pldat = netdev_priv(ndev);
906 struct sk_buff *skb;
907 u32 txcidx, *ptxstat, txstat; 901 u32 txcidx, *ptxstat, txstat;
908 902
909 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base)); 903 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
910 while (pldat->last_tx_idx != txcidx) { 904 while (pldat->last_tx_idx != txcidx) {
911 skb = pldat->skb[pldat->last_tx_idx]; 905 unsigned int skblen = pldat->skblen[pldat->last_tx_idx];
912 906
913 /* A buffer is available, get buffer status */ 907 /* A buffer is available, get buffer status */
914 ptxstat = &pldat->tx_stat_v[pldat->last_tx_idx]; 908 ptxstat = &pldat->tx_stat_v[pldat->last_tx_idx];
@@ -945,9 +939,8 @@ static void __lpc_handle_xmit(struct net_device *ndev)
945 } else { 939 } else {
946 /* Update stats */ 940 /* Update stats */
947 ndev->stats.tx_packets++; 941 ndev->stats.tx_packets++;
948 ndev->stats.tx_bytes += skb->len; 942 ndev->stats.tx_bytes += skblen;
949 } 943 }
950 dev_kfree_skb_irq(skb);
951 944
952 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base)); 945 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
953 } 946 }
@@ -1132,7 +1125,7 @@ static int lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1132 memcpy(pldat->tx_buff_v + txidx * ENET_MAXF_SIZE, skb->data, len); 1125 memcpy(pldat->tx_buff_v + txidx * ENET_MAXF_SIZE, skb->data, len);
1133 1126
1134 /* Save the buffer and increment the buffer counter */ 1127 /* Save the buffer and increment the buffer counter */
1135 pldat->skb[txidx] = skb; 1128 pldat->skblen[txidx] = len;
1136 pldat->num_used_tx_buffs++; 1129 pldat->num_used_tx_buffs++;
1137 1130
1138 /* Start transmit */ 1131 /* Start transmit */
@@ -1147,6 +1140,7 @@ static int lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1147 1140
1148 spin_unlock_irq(&pldat->lock); 1141 spin_unlock_irq(&pldat->lock);
1149 1142
1143 dev_kfree_skb(skb);
1150 return NETDEV_TX_OK; 1144 return NETDEV_TX_OK;
1151} 1145}
1152 1146
@@ -1442,7 +1436,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
1442 res->start); 1436 res->start);
1443 netdev_dbg(ndev, "IO address size :%d\n", 1437 netdev_dbg(ndev, "IO address size :%d\n",
1444 res->end - res->start + 1); 1438 res->end - res->start + 1);
1445 netdev_err(ndev, "IO address (mapped) :0x%p\n", 1439 netdev_dbg(ndev, "IO address (mapped) :0x%p\n",
1446 pldat->net_base); 1440 pldat->net_base);
1447 netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq); 1441 netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq);
1448 netdev_dbg(ndev, "DMA buffer size :%d\n", pldat->dma_buff_size); 1442 netdev_dbg(ndev, "DMA buffer size :%d\n", pldat->dma_buff_size);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
index e48f084ad226..5ae03e815ee9 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
@@ -60,7 +60,7 @@ static void pch_gbe_plat_get_bus_info(struct pch_gbe_hw *hw)
60/** 60/**
61 * pch_gbe_plat_init_hw - Initialize hardware 61 * pch_gbe_plat_init_hw - Initialize hardware
62 * @hw: Pointer to the HW structure 62 * @hw: Pointer to the HW structure
63 * Returns 63 * Returns:
64 * 0: Successfully 64 * 0: Successfully
65 * Negative value: Failed-EBUSY 65 * Negative value: Failed-EBUSY
66 */ 66 */
@@ -108,7 +108,7 @@ static void pch_gbe_plat_init_function_pointers(struct pch_gbe_hw *hw)
108/** 108/**
109 * pch_gbe_hal_setup_init_funcs - Initializes function pointers 109 * pch_gbe_hal_setup_init_funcs - Initializes function pointers
110 * @hw: Pointer to the HW structure 110 * @hw: Pointer to the HW structure
111 * Returns 111 * Returns:
112 * 0: Successfully 112 * 0: Successfully
113 * ENOSYS: Function is not registered 113 * ENOSYS: Function is not registered
114 */ 114 */
@@ -137,7 +137,7 @@ inline void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw)
137/** 137/**
138 * pch_gbe_hal_init_hw - Initialize hardware 138 * pch_gbe_hal_init_hw - Initialize hardware
139 * @hw: Pointer to the HW structure 139 * @hw: Pointer to the HW structure
140 * Returns 140 * Returns:
141 * 0: Successfully 141 * 0: Successfully
142 * ENOSYS: Function is not registered 142 * ENOSYS: Function is not registered
143 */ 143 */
@@ -155,7 +155,7 @@ inline s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw)
155 * @hw: Pointer to the HW structure 155 * @hw: Pointer to the HW structure
156 * @offset: The register to read 156 * @offset: The register to read
157 * @data: The buffer to store the 16-bit read. 157 * @data: The buffer to store the 16-bit read.
158 * Returns 158 * Returns:
159 * 0: Successfully 159 * 0: Successfully
160 * Negative value: Failed 160 * Negative value: Failed
161 */ 161 */
@@ -172,7 +172,7 @@ inline s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset,
172 * @hw: Pointer to the HW structure 172 * @hw: Pointer to the HW structure
173 * @offset: The register to read 173 * @offset: The register to read
174 * @data: The value to write. 174 * @data: The value to write.
175 * Returns 175 * Returns:
176 * 0: Successfully 176 * 0: Successfully
177 * Negative value: Failed 177 * Negative value: Failed
178 */ 178 */
@@ -211,7 +211,7 @@ inline void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw)
211/** 211/**
212 * pch_gbe_hal_read_mac_addr - Reads MAC address 212 * pch_gbe_hal_read_mac_addr - Reads MAC address
213 * @hw: Pointer to the HW structure 213 * @hw: Pointer to the HW structure
214 * Returns 214 * Returns:
215 * 0: Successfully 215 * 0: Successfully
216 * ENOSYS: Function is not registered 216 * ENOSYS: Function is not registered
217 */ 217 */
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index ac4e72d529e5..9dbf38c10a68 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -77,7 +77,7 @@ static const struct pch_gbe_stats pch_gbe_gstrings_stats[] = {
77 * pch_gbe_get_settings - Get device-specific settings 77 * pch_gbe_get_settings - Get device-specific settings
78 * @netdev: Network interface device structure 78 * @netdev: Network interface device structure
79 * @ecmd: Ethtool command 79 * @ecmd: Ethtool command
80 * Returns 80 * Returns:
81 * 0: Successful. 81 * 0: Successful.
82 * Negative value: Failed. 82 * Negative value: Failed.
83 */ 83 */
@@ -100,7 +100,7 @@ static int pch_gbe_get_settings(struct net_device *netdev,
100 * pch_gbe_set_settings - Set device-specific settings 100 * pch_gbe_set_settings - Set device-specific settings
101 * @netdev: Network interface device structure 101 * @netdev: Network interface device structure
102 * @ecmd: Ethtool command 102 * @ecmd: Ethtool command
103 * Returns 103 * Returns:
104 * 0: Successful. 104 * 0: Successful.
105 * Negative value: Failed. 105 * Negative value: Failed.
106 */ 106 */
@@ -220,7 +220,7 @@ static void pch_gbe_get_wol(struct net_device *netdev,
220 * pch_gbe_set_wol - Turn Wake-on-Lan on or off 220 * pch_gbe_set_wol - Turn Wake-on-Lan on or off
221 * @netdev: Network interface device structure 221 * @netdev: Network interface device structure
222 * @wol: Pointer of wake-on-Lan information straucture 222 * @wol: Pointer of wake-on-Lan information straucture
223 * Returns 223 * Returns:
224 * 0: Successful. 224 * 0: Successful.
225 * Negative value: Failed. 225 * Negative value: Failed.
226 */ 226 */
@@ -248,7 +248,7 @@ static int pch_gbe_set_wol(struct net_device *netdev,
248/** 248/**
249 * pch_gbe_nway_reset - Restart autonegotiation 249 * pch_gbe_nway_reset - Restart autonegotiation
250 * @netdev: Network interface device structure 250 * @netdev: Network interface device structure
251 * Returns 251 * Returns:
252 * 0: Successful. 252 * 0: Successful.
253 * Negative value: Failed. 253 * Negative value: Failed.
254 */ 254 */
@@ -398,7 +398,7 @@ static void pch_gbe_get_pauseparam(struct net_device *netdev,
398 * pch_gbe_set_pauseparam - Set pause paramters 398 * pch_gbe_set_pauseparam - Set pause paramters
399 * @netdev: Network interface device structure 399 * @netdev: Network interface device structure
400 * @pause: Pause parameters structure 400 * @pause: Pause parameters structure
401 * Returns 401 * Returns:
402 * 0: Successful. 402 * 0: Successful.
403 * Negative value: Failed. 403 * Negative value: Failed.
404 */ 404 */
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 3787c64ee71c..b1006563f736 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -301,7 +301,7 @@ inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
301/** 301/**
302 * pch_gbe_mac_read_mac_addr - Read MAC address 302 * pch_gbe_mac_read_mac_addr - Read MAC address
303 * @hw: Pointer to the HW structure 303 * @hw: Pointer to the HW structure
304 * Returns 304 * Returns:
305 * 0: Successful. 305 * 0: Successful.
306 */ 306 */
307s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw) 307s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
@@ -483,7 +483,7 @@ static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
483/** 483/**
484 * pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings 484 * pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings
485 * @hw: Pointer to the HW structure 485 * @hw: Pointer to the HW structure
486 * Returns 486 * Returns:
487 * 0: Successful. 487 * 0: Successful.
488 * Negative value: Failed. 488 * Negative value: Failed.
489 */ 489 */
@@ -639,7 +639,7 @@ static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
639/** 639/**
640 * pch_gbe_alloc_queues - Allocate memory for all rings 640 * pch_gbe_alloc_queues - Allocate memory for all rings
641 * @adapter: Board private structure to initialize 641 * @adapter: Board private structure to initialize
642 * Returns 642 * Returns:
643 * 0: Successfully 643 * 0: Successfully
644 * Negative value: Failed 644 * Negative value: Failed
645 */ 645 */
@@ -670,7 +670,7 @@ static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter)
670/** 670/**
671 * pch_gbe_init_phy - Initialize PHY 671 * pch_gbe_init_phy - Initialize PHY
672 * @adapter: Board private structure to initialize 672 * @adapter: Board private structure to initialize
673 * Returns 673 * Returns:
674 * 0: Successfully 674 * 0: Successfully
675 * Negative value: Failed 675 * Negative value: Failed
676 */ 676 */
@@ -720,7 +720,7 @@ static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
720 * @netdev: Network interface device structure 720 * @netdev: Network interface device structure
721 * @addr: Phy ID 721 * @addr: Phy ID
722 * @reg: Access location 722 * @reg: Access location
723 * Returns 723 * Returns:
724 * 0: Successfully 724 * 0: Successfully
725 * Negative value: Failed 725 * Negative value: Failed
726 */ 726 */
@@ -1364,7 +1364,7 @@ static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
1364 * pch_gbe_intr - Interrupt Handler 1364 * pch_gbe_intr - Interrupt Handler
1365 * @irq: Interrupt number 1365 * @irq: Interrupt number
1366 * @data: Pointer to a network interface device structure 1366 * @data: Pointer to a network interface device structure
1367 * Returns 1367 * Returns:
1368 * - IRQ_HANDLED: Our interrupt 1368 * - IRQ_HANDLED: Our interrupt
1369 * - IRQ_NONE: Not our interrupt 1369 * - IRQ_NONE: Not our interrupt
1370 */ 1370 */
@@ -1566,7 +1566,7 @@ static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter,
1566 * pch_gbe_clean_tx - Reclaim resources after transmit completes 1566 * pch_gbe_clean_tx - Reclaim resources after transmit completes
1567 * @adapter: Board private structure 1567 * @adapter: Board private structure
1568 * @tx_ring: Tx descriptor ring 1568 * @tx_ring: Tx descriptor ring
1569 * Returns 1569 * Returns:
1570 * true: Cleaned the descriptor 1570 * true: Cleaned the descriptor
1571 * false: Not cleaned the descriptor 1571 * false: Not cleaned the descriptor
1572 */ 1572 */
@@ -1660,7 +1660,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
1660 * @rx_ring: Rx descriptor ring 1660 * @rx_ring: Rx descriptor ring
1661 * @work_done: Completed count 1661 * @work_done: Completed count
1662 * @work_to_do: Request count 1662 * @work_to_do: Request count
1663 * Returns 1663 * Returns:
1664 * true: Cleaned the descriptor 1664 * true: Cleaned the descriptor
1665 * false: Not cleaned the descriptor 1665 * false: Not cleaned the descriptor
1666 */ 1666 */
@@ -1775,7 +1775,7 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1775 * pch_gbe_setup_tx_resources - Allocate Tx resources (Descriptors) 1775 * pch_gbe_setup_tx_resources - Allocate Tx resources (Descriptors)
1776 * @adapter: Board private structure 1776 * @adapter: Board private structure
1777 * @tx_ring: Tx descriptor ring (for a specific queue) to setup 1777 * @tx_ring: Tx descriptor ring (for a specific queue) to setup
1778 * Returns 1778 * Returns:
1779 * 0: Successfully 1779 * 0: Successfully
1780 * Negative value: Failed 1780 * Negative value: Failed
1781 */ 1781 */
@@ -1822,7 +1822,7 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
1822 * pch_gbe_setup_rx_resources - Allocate Rx resources (Descriptors) 1822 * pch_gbe_setup_rx_resources - Allocate Rx resources (Descriptors)
1823 * @adapter: Board private structure 1823 * @adapter: Board private structure
1824 * @rx_ring: Rx descriptor ring (for a specific queue) to setup 1824 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1825 * Returns 1825 * Returns:
1826 * 0: Successfully 1826 * 0: Successfully
1827 * Negative value: Failed 1827 * Negative value: Failed
1828 */ 1828 */
@@ -1899,7 +1899,7 @@ void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
1899/** 1899/**
1900 * pch_gbe_request_irq - Allocate an interrupt line 1900 * pch_gbe_request_irq - Allocate an interrupt line
1901 * @adapter: Board private structure 1901 * @adapter: Board private structure
1902 * Returns 1902 * Returns:
1903 * 0: Successfully 1903 * 0: Successfully
1904 * Negative value: Failed 1904 * Negative value: Failed
1905 */ 1905 */
@@ -1932,7 +1932,7 @@ static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
1932/** 1932/**
1933 * pch_gbe_up - Up GbE network device 1933 * pch_gbe_up - Up GbE network device
1934 * @adapter: Board private structure 1934 * @adapter: Board private structure
1935 * Returns 1935 * Returns:
1936 * 0: Successfully 1936 * 0: Successfully
1937 * Negative value: Failed 1937 * Negative value: Failed
1938 */ 1938 */
@@ -2018,7 +2018,7 @@ void pch_gbe_down(struct pch_gbe_adapter *adapter)
2018/** 2018/**
2019 * pch_gbe_sw_init - Initialize general software structures (struct pch_gbe_adapter) 2019 * pch_gbe_sw_init - Initialize general software structures (struct pch_gbe_adapter)
2020 * @adapter: Board private structure to initialize 2020 * @adapter: Board private structure to initialize
2021 * Returns 2021 * Returns:
2022 * 0: Successfully 2022 * 0: Successfully
2023 * Negative value: Failed 2023 * Negative value: Failed
2024 */ 2024 */
@@ -2057,7 +2057,7 @@ static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
2057/** 2057/**
2058 * pch_gbe_open - Called when a network interface is made active 2058 * pch_gbe_open - Called when a network interface is made active
2059 * @netdev: Network interface device structure 2059 * @netdev: Network interface device structure
2060 * Returns 2060 * Returns:
2061 * 0: Successfully 2061 * 0: Successfully
2062 * Negative value: Failed 2062 * Negative value: Failed
2063 */ 2063 */
@@ -2097,7 +2097,7 @@ err_setup_tx:
2097/** 2097/**
2098 * pch_gbe_stop - Disables a network interface 2098 * pch_gbe_stop - Disables a network interface
2099 * @netdev: Network interface device structure 2099 * @netdev: Network interface device structure
2100 * Returns 2100 * Returns:
2101 * 0: Successfully 2101 * 0: Successfully
2102 */ 2102 */
2103static int pch_gbe_stop(struct net_device *netdev) 2103static int pch_gbe_stop(struct net_device *netdev)
@@ -2117,7 +2117,7 @@ static int pch_gbe_stop(struct net_device *netdev)
2117 * pch_gbe_xmit_frame - Packet transmitting start 2117 * pch_gbe_xmit_frame - Packet transmitting start
2118 * @skb: Socket buffer structure 2118 * @skb: Socket buffer structure
2119 * @netdev: Network interface device structure 2119 * @netdev: Network interface device structure
2120 * Returns 2120 * Returns:
2121 * - NETDEV_TX_OK: Normal end 2121 * - NETDEV_TX_OK: Normal end
2122 * - NETDEV_TX_BUSY: Error end 2122 * - NETDEV_TX_BUSY: Error end
2123 */ 2123 */
@@ -2225,7 +2225,7 @@ static void pch_gbe_set_multi(struct net_device *netdev)
2225 * pch_gbe_set_mac - Change the Ethernet Address of the NIC 2225 * pch_gbe_set_mac - Change the Ethernet Address of the NIC
2226 * @netdev: Network interface device structure 2226 * @netdev: Network interface device structure
2227 * @addr: Pointer to an address structure 2227 * @addr: Pointer to an address structure
2228 * Returns 2228 * Returns:
2229 * 0: Successfully 2229 * 0: Successfully
2230 * -EADDRNOTAVAIL: Failed 2230 * -EADDRNOTAVAIL: Failed
2231 */ 2231 */
@@ -2256,7 +2256,7 @@ static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
2256 * pch_gbe_change_mtu - Change the Maximum Transfer Unit 2256 * pch_gbe_change_mtu - Change the Maximum Transfer Unit
2257 * @netdev: Network interface device structure 2257 * @netdev: Network interface device structure
2258 * @new_mtu: New value for maximum frame size 2258 * @new_mtu: New value for maximum frame size
2259 * Returns 2259 * Returns:
2260 * 0: Successfully 2260 * 0: Successfully
2261 * -EINVAL: Failed 2261 * -EINVAL: Failed
2262 */ 2262 */
@@ -2309,7 +2309,7 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
2309 * pch_gbe_set_features - Reset device after features changed 2309 * pch_gbe_set_features - Reset device after features changed
2310 * @netdev: Network interface device structure 2310 * @netdev: Network interface device structure
2311 * @features: New features 2311 * @features: New features
2312 * Returns 2312 * Returns:
2313 * 0: HW state updated successfully 2313 * 0: HW state updated successfully
2314 */ 2314 */
2315static int pch_gbe_set_features(struct net_device *netdev, 2315static int pch_gbe_set_features(struct net_device *netdev,
@@ -2334,7 +2334,7 @@ static int pch_gbe_set_features(struct net_device *netdev,
2334 * @netdev: Network interface device structure 2334 * @netdev: Network interface device structure
2335 * @ifr: Pointer to ifr structure 2335 * @ifr: Pointer to ifr structure
2336 * @cmd: Control command 2336 * @cmd: Control command
2337 * Returns 2337 * Returns:
2338 * 0: Successfully 2338 * 0: Successfully
2339 * Negative value: Failed 2339 * Negative value: Failed
2340 */ 2340 */
@@ -2369,7 +2369,7 @@ static void pch_gbe_tx_timeout(struct net_device *netdev)
2369 * pch_gbe_napi_poll - NAPI receive and transfer polling callback 2369 * pch_gbe_napi_poll - NAPI receive and transfer polling callback
2370 * @napi: Pointer of polling device struct 2370 * @napi: Pointer of polling device struct
2371 * @budget: The maximum number of a packet 2371 * @budget: The maximum number of a packet
2372 * Returns 2372 * Returns:
2373 * false: Exit the polling mode 2373 * false: Exit the polling mode
2374 * true: Continue the polling mode 2374 * true: Continue the polling mode
2375 */ 2375 */
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
index 29e23bec809c..8653c3b81f84 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
@@ -139,7 +139,7 @@ MODULE_PARM_DESC(XsumTX, "Disable or enable Transmit Checksum offload");
139/** 139/**
140 * pch_gbe_option - Force the MAC's flow control settings 140 * pch_gbe_option - Force the MAC's flow control settings
141 * @hw: Pointer to the HW structure 141 * @hw: Pointer to the HW structure
142 * Returns 142 * Returns:
143 * 0: Successful. 143 * 0: Successful.
144 * Negative value: Failed. 144 * Negative value: Failed.
145 */ 145 */
@@ -220,7 +220,7 @@ static const struct pch_gbe_opt_list fc_list[] = {
220 * @value: value 220 * @value: value
221 * @opt: option 221 * @opt: option
222 * @adapter: Board private structure 222 * @adapter: Board private structure
223 * Returns 223 * Returns:
224 * 0: Successful. 224 * 0: Successful.
225 * Negative value: Failed. 225 * Negative value: Failed.
226 */ 226 */
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index 37ccbe54e62d..eb3dfdbb642b 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
53 53
54#define _NETXEN_NIC_LINUX_MAJOR 4 54#define _NETXEN_NIC_LINUX_MAJOR 4
55#define _NETXEN_NIC_LINUX_MINOR 0 55#define _NETXEN_NIC_LINUX_MINOR 0
56#define _NETXEN_NIC_LINUX_SUBVERSION 79 56#define _NETXEN_NIC_LINUX_SUBVERSION 80
57#define NETXEN_NIC_LINUX_VERSIONID "4.0.79" 57#define NETXEN_NIC_LINUX_VERSIONID "4.0.80"
58 58
59#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) 59#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
60#define _major(v) (((v) >> 24) & 0xff) 60#define _major(v) (((v) >> 24) & 0xff)
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 39730403782f..10468e7932dd 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -489,7 +489,7 @@ netxen_nic_get_pauseparam(struct net_device *dev,
489 int port = adapter->physical_port; 489 int port = adapter->physical_port;
490 490
491 if (adapter->ahw.port_type == NETXEN_NIC_GBE) { 491 if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
492 if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS)) 492 if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS))
493 return; 493 return;
494 /* get flow control settings */ 494 /* get flow control settings */
495 val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port)); 495 val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port));
@@ -511,7 +511,7 @@ netxen_nic_get_pauseparam(struct net_device *dev,
511 break; 511 break;
512 } 512 }
513 } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { 513 } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
514 if ((port < 0) || (port > NETXEN_NIU_MAX_XG_PORTS)) 514 if ((port < 0) || (port >= NETXEN_NIU_MAX_XG_PORTS))
515 return; 515 return;
516 pause->rx_pause = 1; 516 pause->rx_pause = 1;
517 val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL); 517 val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL);
@@ -534,7 +534,7 @@ netxen_nic_set_pauseparam(struct net_device *dev,
534 int port = adapter->physical_port; 534 int port = adapter->physical_port;
535 /* read mode */ 535 /* read mode */
536 if (adapter->ahw.port_type == NETXEN_NIC_GBE) { 536 if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
537 if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS)) 537 if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS))
538 return -EIO; 538 return -EIO;
539 /* set flow control */ 539 /* set flow control */
540 val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port)); 540 val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port));
@@ -577,7 +577,7 @@ netxen_nic_set_pauseparam(struct net_device *dev,
577 } 577 }
578 NXWR32(adapter, NETXEN_NIU_GB_PAUSE_CTL, val); 578 NXWR32(adapter, NETXEN_NIU_GB_PAUSE_CTL, val);
579 } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { 579 } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
580 if ((port < 0) || (port > NETXEN_NIU_MAX_XG_PORTS)) 580 if ((port < 0) || (port >= NETXEN_NIU_MAX_XG_PORTS))
581 return -EIO; 581 return -EIO;
582 val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL); 582 val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL);
583 if (port == 0) { 583 if (port == 0) {
@@ -826,7 +826,12 @@ netxen_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
826 dump->len = mdump->md_dump_size; 826 dump->len = mdump->md_dump_size;
827 else 827 else
828 dump->len = 0; 828 dump->len = 0;
829 dump->flag = mdump->md_capture_mask; 829
830 if (!mdump->md_enabled)
831 dump->flag = ETH_FW_DUMP_DISABLE;
832 else
833 dump->flag = mdump->md_capture_mask;
834
830 dump->version = adapter->fw_version; 835 dump->version = adapter->fw_version;
831 return 0; 836 return 0;
832} 837}
@@ -840,8 +845,10 @@ netxen_set_dump(struct net_device *netdev, struct ethtool_dump *val)
840 845
841 switch (val->flag) { 846 switch (val->flag) {
842 case NX_FORCE_FW_DUMP_KEY: 847 case NX_FORCE_FW_DUMP_KEY:
843 if (!mdump->md_enabled) 848 if (!mdump->md_enabled) {
844 mdump->md_enabled = 1; 849 netdev_info(netdev, "FW dump not enabled\n");
850 return 0;
851 }
845 if (adapter->fw_mdump_rdy) { 852 if (adapter->fw_mdump_rdy) {
846 netdev_info(netdev, "Previous dump not cleared, not forcing dump\n"); 853 netdev_info(netdev, "Previous dump not cleared, not forcing dump\n");
847 return 0; 854 return 0;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index de96a948bb7f..946160fa5843 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -365,7 +365,7 @@ static int netxen_niu_disable_xg_port(struct netxen_adapter *adapter)
365 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 365 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
366 return 0; 366 return 0;
367 367
368 if (port > NETXEN_NIU_MAX_XG_PORTS) 368 if (port >= NETXEN_NIU_MAX_XG_PORTS)
369 return -EINVAL; 369 return -EINVAL;
370 370
371 mac_cfg = 0; 371 mac_cfg = 0;
@@ -392,7 +392,7 @@ static int netxen_p2_nic_set_promisc(struct netxen_adapter *adapter, u32 mode)
392 u32 port = adapter->physical_port; 392 u32 port = adapter->physical_port;
393 u16 board_type = adapter->ahw.board_type; 393 u16 board_type = adapter->ahw.board_type;
394 394
395 if (port > NETXEN_NIU_MAX_XG_PORTS) 395 if (port >= NETXEN_NIU_MAX_XG_PORTS)
396 return -EINVAL; 396 return -EINVAL;
397 397
398 mac_cfg = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port)); 398 mac_cfg = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port));
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 8694124ef77d..bc165f4d0f65 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -1437,8 +1437,6 @@ netxen_handle_linkevent(struct netxen_adapter *adapter, nx_fw_msg_t *msg)
1437 netdev->name, cable_len); 1437 netdev->name, cable_len);
1438 } 1438 }
1439 1439
1440 netxen_advert_link_change(adapter, link_status);
1441
1442 /* update link parameters */ 1440 /* update link parameters */
1443 if (duplex == LINKEVENT_FULL_DUPLEX) 1441 if (duplex == LINKEVENT_FULL_DUPLEX)
1444 adapter->link_duplex = DUPLEX_FULL; 1442 adapter->link_duplex = DUPLEX_FULL;
@@ -1447,6 +1445,8 @@ netxen_handle_linkevent(struct netxen_adapter *adapter, nx_fw_msg_t *msg)
1447 adapter->module_type = module; 1445 adapter->module_type = module;
1448 adapter->link_autoneg = autoneg; 1446 adapter->link_autoneg = autoneg;
1449 adapter->link_speed = link_speed; 1447 adapter->link_speed = link_speed;
1448
1449 netxen_advert_link_change(adapter, link_status);
1450} 1450}
1451 1451
1452static void 1452static void
@@ -1532,8 +1532,6 @@ static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter,
1532 } else 1532 } else
1533 skb->ip_summed = CHECKSUM_NONE; 1533 skb->ip_summed = CHECKSUM_NONE;
1534 1534
1535 skb->dev = adapter->netdev;
1536
1537 buffer->skb = NULL; 1535 buffer->skb = NULL;
1538no_skb: 1536no_skb:
1539 buffer->state = NETXEN_BUFFER_FREE; 1537 buffer->state = NETXEN_BUFFER_FREE;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 8680a5dae4a2..eaa1db9fec32 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -36,8 +36,8 @@
36 36
37#define _QLCNIC_LINUX_MAJOR 5 37#define _QLCNIC_LINUX_MAJOR 5
38#define _QLCNIC_LINUX_MINOR 0 38#define _QLCNIC_LINUX_MINOR 0
39#define _QLCNIC_LINUX_SUBVERSION 28 39#define _QLCNIC_LINUX_SUBVERSION 29
40#define QLCNIC_LINUX_VERSIONID "5.0.28" 40#define QLCNIC_LINUX_VERSIONID "5.0.29"
41#define QLCNIC_DRV_IDC_VER 0x01 41#define QLCNIC_DRV_IDC_VER 0x01
42#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 42#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
43 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 43 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -258,6 +258,8 @@ struct rcv_desc {
258 (((sts_data) >> 52) & 0x1) 258 (((sts_data) >> 52) & 0x1)
259#define qlcnic_get_lro_sts_seq_number(sts_data) \ 259#define qlcnic_get_lro_sts_seq_number(sts_data) \
260 ((sts_data) & 0x0FFFFFFFF) 260 ((sts_data) & 0x0FFFFFFFF)
261#define qlcnic_get_lro_sts_mss(sts_data1) \
262 ((sts_data1 >> 32) & 0x0FFFF)
261 263
262 264
263struct status_desc { 265struct status_desc {
@@ -610,7 +612,11 @@ struct qlcnic_recv_context {
610#define QLCNIC_CDRP_CMD_GET_MAC_STATS 0x00000037 612#define QLCNIC_CDRP_CMD_GET_MAC_STATS 0x00000037
611 613
612#define QLCNIC_RCODE_SUCCESS 0 614#define QLCNIC_RCODE_SUCCESS 0
615#define QLCNIC_RCODE_INVALID_ARGS 6
613#define QLCNIC_RCODE_NOT_SUPPORTED 9 616#define QLCNIC_RCODE_NOT_SUPPORTED 9
617#define QLCNIC_RCODE_NOT_PERMITTED 10
618#define QLCNIC_RCODE_NOT_IMPL 15
619#define QLCNIC_RCODE_INVALID 16
614#define QLCNIC_RCODE_TIMEOUT 17 620#define QLCNIC_RCODE_TIMEOUT 17
615#define QLCNIC_DESTROY_CTX_RESET 0 621#define QLCNIC_DESTROY_CTX_RESET 0
616 622
@@ -623,6 +629,7 @@ struct qlcnic_recv_context {
623#define QLCNIC_CAP0_JUMBO_CONTIGUOUS (1 << 7) 629#define QLCNIC_CAP0_JUMBO_CONTIGUOUS (1 << 7)
624#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8) 630#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8)
625#define QLCNIC_CAP0_VALIDOFF (1 << 11) 631#define QLCNIC_CAP0_VALIDOFF (1 << 11)
632#define QLCNIC_CAP0_LRO_MSS (1 << 21)
626 633
627/* 634/*
628 * Context state 635 * Context state
@@ -829,6 +836,9 @@ struct qlcnic_mac_list_s {
829#define QLCNIC_FW_CAPABILITY_FVLANTX BIT_9 836#define QLCNIC_FW_CAPABILITY_FVLANTX BIT_9
830#define QLCNIC_FW_CAPABILITY_HW_LRO BIT_10 837#define QLCNIC_FW_CAPABILITY_HW_LRO BIT_10
831#define QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK BIT_27 838#define QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK BIT_27
839#define QLCNIC_FW_CAPABILITY_MORE_CAPS BIT_31
840
841#define QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG BIT_2
832 842
833/* module types */ 843/* module types */
834#define LINKEVENT_MODULE_NOT_PRESENT 1 844#define LINKEVENT_MODULE_NOT_PRESENT 1
@@ -918,6 +928,7 @@ struct qlcnic_ipaddr {
918#define QLCNIC_NEED_FLR 0x1000 928#define QLCNIC_NEED_FLR 0x1000
919#define QLCNIC_FW_RESET_OWNER 0x2000 929#define QLCNIC_FW_RESET_OWNER 0x2000
920#define QLCNIC_FW_HANG 0x4000 930#define QLCNIC_FW_HANG 0x4000
931#define QLCNIC_FW_LRO_MSS_CAP 0x8000
921#define QLCNIC_IS_MSI_FAMILY(adapter) \ 932#define QLCNIC_IS_MSI_FAMILY(adapter) \
922 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) 933 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
923 934
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 8db85244e8ad..b8ead696141e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -53,12 +53,39 @@ qlcnic_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd)
53 rsp = qlcnic_poll_rsp(adapter); 53 rsp = qlcnic_poll_rsp(adapter);
54 54
55 if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) { 55 if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
56 dev_err(&pdev->dev, "card response timeout.\n"); 56 dev_err(&pdev->dev, "CDRP response timeout.\n");
57 cmd->rsp.cmd = QLCNIC_RCODE_TIMEOUT; 57 cmd->rsp.cmd = QLCNIC_RCODE_TIMEOUT;
58 } else if (rsp == QLCNIC_CDRP_RSP_FAIL) { 58 } else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
59 cmd->rsp.cmd = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET); 59 cmd->rsp.cmd = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
60 dev_err(&pdev->dev, "failed card response code:0x%x\n", 60 switch (cmd->rsp.cmd) {
61 case QLCNIC_RCODE_INVALID_ARGS:
62 dev_err(&pdev->dev, "CDRP invalid args: 0x%x.\n",
61 cmd->rsp.cmd); 63 cmd->rsp.cmd);
64 break;
65 case QLCNIC_RCODE_NOT_SUPPORTED:
66 case QLCNIC_RCODE_NOT_IMPL:
67 dev_err(&pdev->dev,
68 "CDRP command not supported: 0x%x.\n",
69 cmd->rsp.cmd);
70 break;
71 case QLCNIC_RCODE_NOT_PERMITTED:
72 dev_err(&pdev->dev,
73 "CDRP requested action not permitted: 0x%x.\n",
74 cmd->rsp.cmd);
75 break;
76 case QLCNIC_RCODE_INVALID:
77 dev_err(&pdev->dev,
78 "CDRP invalid or unknown cmd received: 0x%x.\n",
79 cmd->rsp.cmd);
80 break;
81 case QLCNIC_RCODE_TIMEOUT:
82 dev_err(&pdev->dev, "CDRP command timeout: 0x%x.\n",
83 cmd->rsp.cmd);
84 break;
85 default:
86 dev_err(&pdev->dev, "CDRP command failed: 0x%x.\n",
87 cmd->rsp.cmd);
88 }
62 } else if (rsp == QLCNIC_CDRP_RSP_OK) { 89 } else if (rsp == QLCNIC_CDRP_RSP_OK) {
63 cmd->rsp.cmd = QLCNIC_RCODE_SUCCESS; 90 cmd->rsp.cmd = QLCNIC_RCODE_SUCCESS;
64 if (cmd->rsp.arg2) 91 if (cmd->rsp.arg2)
@@ -237,6 +264,9 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
237 | QLCNIC_CAP0_VALIDOFF); 264 | QLCNIC_CAP0_VALIDOFF);
238 cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS); 265 cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
239 266
267 if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
268 cap |= QLCNIC_CAP0_LRO_MSS;
269
240 prq->valid_field_offset = offsetof(struct qlcnic_hostrq_rx_ctx, 270 prq->valid_field_offset = offsetof(struct qlcnic_hostrq_rx_ctx,
241 msix_handler); 271 msix_handler);
242 prq->txrx_sds_binding = nsds_rings - 1; 272 prq->txrx_sds_binding = nsds_rings - 1;
@@ -954,9 +984,6 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
954 mac_stats->mac_rx_jabber = le64_to_cpu(stats->mac_rx_jabber); 984 mac_stats->mac_rx_jabber = le64_to_cpu(stats->mac_rx_jabber);
955 mac_stats->mac_rx_dropped = le64_to_cpu(stats->mac_rx_dropped); 985 mac_stats->mac_rx_dropped = le64_to_cpu(stats->mac_rx_dropped);
956 mac_stats->mac_rx_crc_error = le64_to_cpu(stats->mac_rx_crc_error); 986 mac_stats->mac_rx_crc_error = le64_to_cpu(stats->mac_rx_crc_error);
957 } else {
958 dev_info(&adapter->pdev->dev,
959 "%s: Get mac stats failed =%d.\n", __func__, err);
960 } 987 }
961 988
962 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr, 989 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
index 6ced3195aad3..28a6b28192e3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
@@ -588,6 +588,7 @@ enum {
588#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0)) 588#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0))
589 589
590#define CRB_FW_CAPABILITIES_1 (QLCNIC_CAM_RAM(0x128)) 590#define CRB_FW_CAPABILITIES_1 (QLCNIC_CAM_RAM(0x128))
591#define CRB_FW_CAPABILITIES_2 (QLCNIC_CAM_RAM(0x12c))
591#define CRB_MAC_BLOCK_START (QLCNIC_CAM_RAM(0x1c0)) 592#define CRB_MAC_BLOCK_START (QLCNIC_CAM_RAM(0x1c0))
592 593
593/* 594/*
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index 799fd40ed03a..0bcda9c51e9b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -1488,8 +1488,6 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
1488 skb_checksum_none_assert(skb); 1488 skb_checksum_none_assert(skb);
1489 } 1489 }
1490 1490
1491 skb->dev = adapter->netdev;
1492
1493 buffer->skb = NULL; 1491 buffer->skb = NULL;
1494 1492
1495 return skb; 1493 return skb;
@@ -1653,6 +1651,9 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
1653 1651
1654 length = skb->len; 1652 length = skb->len;
1655 1653
1654 if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
1655 skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
1656
1656 if (vid != 0xffff) 1657 if (vid != 0xffff)
1657 __vlan_hwaccel_put_tag(skb, vid); 1658 __vlan_hwaccel_put_tag(skb, vid);
1658 netif_receive_skb(skb); 1659 netif_receive_skb(skb);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index ad98f4d7919d..212c12193275 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -1136,6 +1136,8 @@ static int
1136__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev) 1136__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1137{ 1137{
1138 int ring; 1138 int ring;
1139 u32 capab2;
1140
1139 struct qlcnic_host_rds_ring *rds_ring; 1141 struct qlcnic_host_rds_ring *rds_ring;
1140 1142
1141 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) 1143 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
@@ -1146,6 +1148,12 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1146 if (qlcnic_set_eswitch_port_config(adapter)) 1148 if (qlcnic_set_eswitch_port_config(adapter))
1147 return -EIO; 1149 return -EIO;
1148 1150
1151 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
1152 capab2 = QLCRD32(adapter, CRB_FW_CAPABILITIES_2);
1153 if (capab2 & QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG)
1154 adapter->flags |= QLCNIC_FW_LRO_MSS_CAP;
1155 }
1156
1149 if (qlcnic_fw_create_ctx(adapter)) 1157 if (qlcnic_fw_create_ctx(adapter))
1150 return -EIO; 1158 return -EIO;
1151 1159
@@ -1215,6 +1223,7 @@ __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1215 qlcnic_napi_disable(adapter); 1223 qlcnic_napi_disable(adapter);
1216 1224
1217 qlcnic_fw_destroy_ctx(adapter); 1225 qlcnic_fw_destroy_ctx(adapter);
1226 adapter->flags &= ~QLCNIC_FW_LRO_MSS_CAP;
1218 1227
1219 qlcnic_reset_rx_buffers_list(adapter); 1228 qlcnic_reset_rx_buffers_list(adapter);
1220 qlcnic_release_tx_buffers(adapter); 1229 qlcnic_release_tx_buffers(adapter);
@@ -2024,6 +2033,7 @@ qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
2024 vh = (struct vlan_ethhdr *)skb->data; 2033 vh = (struct vlan_ethhdr *)skb->data;
2025 flags = FLAGS_VLAN_TAGGED; 2034 flags = FLAGS_VLAN_TAGGED;
2026 vlan_tci = vh->h_vlan_TCI; 2035 vlan_tci = vh->h_vlan_TCI;
2036 protocol = ntohs(vh->h_vlan_encapsulated_proto);
2027 } else if (vlan_tx_tag_present(skb)) { 2037 } else if (vlan_tx_tag_present(skb)) {
2028 flags = FLAGS_VLAN_OOB; 2038 flags = FLAGS_VLAN_OOB;
2029 vlan_tci = vlan_tx_tag_get(skb); 2039 vlan_tci = vlan_tx_tag_get(skb);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 5a639df33f18..a131d7b5d2fe 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -18,13 +18,15 @@
18 */ 18 */
19#define DRV_NAME "qlge" 19#define DRV_NAME "qlge"
20#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " 20#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
21#define DRV_VERSION "v1.00.00.30.00.00-01" 21#define DRV_VERSION "v1.00.00.31"
22 22
23#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ 23#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
24 24
25#define QLGE_VENDOR_ID 0x1077 25#define QLGE_VENDOR_ID 0x1077
26#define QLGE_DEVICE_ID_8012 0x8012 26#define QLGE_DEVICE_ID_8012 0x8012
27#define QLGE_DEVICE_ID_8000 0x8000 27#define QLGE_DEVICE_ID_8000 0x8000
28#define QLGE_MEZZ_SSYS_ID_068 0x0068
29#define QLGE_MEZZ_SSYS_ID_180 0x0180
28#define MAX_CPUS 8 30#define MAX_CPUS 8
29#define MAX_TX_RINGS MAX_CPUS 31#define MAX_TX_RINGS MAX_CPUS
30#define MAX_RX_RINGS ((MAX_CPUS * 2) + 1) 32#define MAX_RX_RINGS ((MAX_CPUS * 2) + 1)
@@ -1397,7 +1399,6 @@ struct tx_ring {
1397 struct tx_ring_desc *q; /* descriptor list for the queue */ 1399 struct tx_ring_desc *q; /* descriptor list for the queue */
1398 spinlock_t lock; 1400 spinlock_t lock;
1399 atomic_t tx_count; /* counts down for every outstanding IO */ 1401 atomic_t tx_count; /* counts down for every outstanding IO */
1400 atomic_t queue_stopped; /* Turns queue off when full. */
1401 struct delayed_work tx_work; 1402 struct delayed_work tx_work;
1402 struct ql_adapter *qdev; 1403 struct ql_adapter *qdev;
1403 u64 tx_packets; 1404 u64 tx_packets;
@@ -1535,6 +1536,14 @@ struct nic_stats {
1535 u64 rx_1024_to_1518_pkts; 1536 u64 rx_1024_to_1518_pkts;
1536 u64 rx_1519_to_max_pkts; 1537 u64 rx_1519_to_max_pkts;
1537 u64 rx_len_err_pkts; 1538 u64 rx_len_err_pkts;
1539 /* Receive Mac Err stats */
1540 u64 rx_code_err;
1541 u64 rx_oversize_err;
1542 u64 rx_undersize_err;
1543 u64 rx_preamble_err;
1544 u64 rx_frame_len_err;
1545 u64 rx_crc_err;
1546 u64 rx_err_count;
1538 /* 1547 /*
1539 * These stats come from offset 500h to 5C8h 1548 * These stats come from offset 500h to 5C8h
1540 * in the XGMAC register. 1549 * in the XGMAC register.
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
index 8e2c2a74f3a5..6f316ab23257 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
@@ -35,10 +35,152 @@
35 35
36#include "qlge.h" 36#include "qlge.h"
37 37
38struct ql_stats {
39 char stat_string[ETH_GSTRING_LEN];
40 int sizeof_stat;
41 int stat_offset;
42};
43
44#define QL_SIZEOF(m) FIELD_SIZEOF(struct ql_adapter, m)
45#define QL_OFF(m) offsetof(struct ql_adapter, m)
46
47static const struct ql_stats ql_gstrings_stats[] = {
48 {"tx_pkts", QL_SIZEOF(nic_stats.tx_pkts), QL_OFF(nic_stats.tx_pkts)},
49 {"tx_bytes", QL_SIZEOF(nic_stats.tx_bytes), QL_OFF(nic_stats.tx_bytes)},
50 {"tx_mcast_pkts", QL_SIZEOF(nic_stats.tx_mcast_pkts),
51 QL_OFF(nic_stats.tx_mcast_pkts)},
52 {"tx_bcast_pkts", QL_SIZEOF(nic_stats.tx_bcast_pkts),
53 QL_OFF(nic_stats.tx_bcast_pkts)},
54 {"tx_ucast_pkts", QL_SIZEOF(nic_stats.tx_ucast_pkts),
55 QL_OFF(nic_stats.tx_ucast_pkts)},
56 {"tx_ctl_pkts", QL_SIZEOF(nic_stats.tx_ctl_pkts),
57 QL_OFF(nic_stats.tx_ctl_pkts)},
58 {"tx_pause_pkts", QL_SIZEOF(nic_stats.tx_pause_pkts),
59 QL_OFF(nic_stats.tx_pause_pkts)},
60 {"tx_64_pkts", QL_SIZEOF(nic_stats.tx_64_pkt),
61 QL_OFF(nic_stats.tx_64_pkt)},
62 {"tx_65_to_127_pkts", QL_SIZEOF(nic_stats.tx_65_to_127_pkt),
63 QL_OFF(nic_stats.tx_65_to_127_pkt)},
64 {"tx_128_to_255_pkts", QL_SIZEOF(nic_stats.tx_128_to_255_pkt),
65 QL_OFF(nic_stats.tx_128_to_255_pkt)},
66 {"tx_256_511_pkts", QL_SIZEOF(nic_stats.tx_256_511_pkt),
67 QL_OFF(nic_stats.tx_256_511_pkt)},
68 {"tx_512_to_1023_pkts", QL_SIZEOF(nic_stats.tx_512_to_1023_pkt),
69 QL_OFF(nic_stats.tx_512_to_1023_pkt)},
70 {"tx_1024_to_1518_pkts", QL_SIZEOF(nic_stats.tx_1024_to_1518_pkt),
71 QL_OFF(nic_stats.tx_1024_to_1518_pkt)},
72 {"tx_1519_to_max_pkts", QL_SIZEOF(nic_stats.tx_1519_to_max_pkt),
73 QL_OFF(nic_stats.tx_1519_to_max_pkt)},
74 {"tx_undersize_pkts", QL_SIZEOF(nic_stats.tx_undersize_pkt),
75 QL_OFF(nic_stats.tx_undersize_pkt)},
76 {"tx_oversize_pkts", QL_SIZEOF(nic_stats.tx_oversize_pkt),
77 QL_OFF(nic_stats.tx_oversize_pkt)},
78 {"rx_bytes", QL_SIZEOF(nic_stats.rx_bytes), QL_OFF(nic_stats.rx_bytes)},
79 {"rx_bytes_ok", QL_SIZEOF(nic_stats.rx_bytes_ok),
80 QL_OFF(nic_stats.rx_bytes_ok)},
81 {"rx_pkts", QL_SIZEOF(nic_stats.rx_pkts), QL_OFF(nic_stats.rx_pkts)},
82 {"rx_pkts_ok", QL_SIZEOF(nic_stats.rx_pkts_ok),
83 QL_OFF(nic_stats.rx_pkts_ok)},
84 {"rx_bcast_pkts", QL_SIZEOF(nic_stats.rx_bcast_pkts),
85 QL_OFF(nic_stats.rx_bcast_pkts)},
86 {"rx_mcast_pkts", QL_SIZEOF(nic_stats.rx_mcast_pkts),
87 QL_OFF(nic_stats.rx_mcast_pkts)},
88 {"rx_ucast_pkts", QL_SIZEOF(nic_stats.rx_ucast_pkts),
89 QL_OFF(nic_stats.rx_ucast_pkts)},
90 {"rx_undersize_pkts", QL_SIZEOF(nic_stats.rx_undersize_pkts),
91 QL_OFF(nic_stats.rx_undersize_pkts)},
92 {"rx_oversize_pkts", QL_SIZEOF(nic_stats.rx_oversize_pkts),
93 QL_OFF(nic_stats.rx_oversize_pkts)},
94 {"rx_jabber_pkts", QL_SIZEOF(nic_stats.rx_jabber_pkts),
95 QL_OFF(nic_stats.rx_jabber_pkts)},
96 {"rx_undersize_fcerr_pkts",
97 QL_SIZEOF(nic_stats.rx_undersize_fcerr_pkts),
98 QL_OFF(nic_stats.rx_undersize_fcerr_pkts)},
99 {"rx_drop_events", QL_SIZEOF(nic_stats.rx_drop_events),
100 QL_OFF(nic_stats.rx_drop_events)},
101 {"rx_fcerr_pkts", QL_SIZEOF(nic_stats.rx_fcerr_pkts),
102 QL_OFF(nic_stats.rx_fcerr_pkts)},
103 {"rx_align_err", QL_SIZEOF(nic_stats.rx_align_err),
104 QL_OFF(nic_stats.rx_align_err)},
105 {"rx_symbol_err", QL_SIZEOF(nic_stats.rx_symbol_err),
106 QL_OFF(nic_stats.rx_symbol_err)},
107 {"rx_mac_err", QL_SIZEOF(nic_stats.rx_mac_err),
108 QL_OFF(nic_stats.rx_mac_err)},
109 {"rx_ctl_pkts", QL_SIZEOF(nic_stats.rx_ctl_pkts),
110 QL_OFF(nic_stats.rx_ctl_pkts)},
111 {"rx_pause_pkts", QL_SIZEOF(nic_stats.rx_pause_pkts),
112 QL_OFF(nic_stats.rx_pause_pkts)},
113 {"rx_64_pkts", QL_SIZEOF(nic_stats.rx_64_pkts),
114 QL_OFF(nic_stats.rx_64_pkts)},
115 {"rx_65_to_127_pkts", QL_SIZEOF(nic_stats.rx_65_to_127_pkts),
116 QL_OFF(nic_stats.rx_65_to_127_pkts)},
117 {"rx_128_255_pkts", QL_SIZEOF(nic_stats.rx_128_255_pkts),
118 QL_OFF(nic_stats.rx_128_255_pkts)},
119 {"rx_256_511_pkts", QL_SIZEOF(nic_stats.rx_256_511_pkts),
120 QL_OFF(nic_stats.rx_256_511_pkts)},
121 {"rx_512_to_1023_pkts", QL_SIZEOF(nic_stats.rx_512_to_1023_pkts),
122 QL_OFF(nic_stats.rx_512_to_1023_pkts)},
123 {"rx_1024_to_1518_pkts", QL_SIZEOF(nic_stats.rx_1024_to_1518_pkts),
124 QL_OFF(nic_stats.rx_1024_to_1518_pkts)},
125 {"rx_1519_to_max_pkts", QL_SIZEOF(nic_stats.rx_1519_to_max_pkts),
126 QL_OFF(nic_stats.rx_1519_to_max_pkts)},
127 {"rx_len_err_pkts", QL_SIZEOF(nic_stats.rx_len_err_pkts),
128 QL_OFF(nic_stats.rx_len_err_pkts)},
129 {"rx_code_err", QL_SIZEOF(nic_stats.rx_code_err),
130 QL_OFF(nic_stats.rx_code_err)},
131 {"rx_oversize_err", QL_SIZEOF(nic_stats.rx_oversize_err),
132 QL_OFF(nic_stats.rx_oversize_err)},
133 {"rx_undersize_err", QL_SIZEOF(nic_stats.rx_undersize_err),
134 QL_OFF(nic_stats.rx_undersize_err)},
135 {"rx_preamble_err", QL_SIZEOF(nic_stats.rx_preamble_err),
136 QL_OFF(nic_stats.rx_preamble_err)},
137 {"rx_frame_len_err", QL_SIZEOF(nic_stats.rx_frame_len_err),
138 QL_OFF(nic_stats.rx_frame_len_err)},
139 {"rx_crc_err", QL_SIZEOF(nic_stats.rx_crc_err),
140 QL_OFF(nic_stats.rx_crc_err)},
141 {"rx_err_count", QL_SIZEOF(nic_stats.rx_err_count),
142 QL_OFF(nic_stats.rx_err_count)},
143 {"tx_cbfc_pause_frames0", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames0),
144 QL_OFF(nic_stats.tx_cbfc_pause_frames0)},
145 {"tx_cbfc_pause_frames1", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames1),
146 QL_OFF(nic_stats.tx_cbfc_pause_frames1)},
147 {"tx_cbfc_pause_frames2", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames2),
148 QL_OFF(nic_stats.tx_cbfc_pause_frames2)},
149 {"tx_cbfc_pause_frames3", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames3),
150 QL_OFF(nic_stats.tx_cbfc_pause_frames3)},
151 {"tx_cbfc_pause_frames4", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames4),
152 QL_OFF(nic_stats.tx_cbfc_pause_frames4)},
153 {"tx_cbfc_pause_frames5", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames5),
154 QL_OFF(nic_stats.tx_cbfc_pause_frames5)},
155 {"tx_cbfc_pause_frames6", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames6),
156 QL_OFF(nic_stats.tx_cbfc_pause_frames6)},
157 {"tx_cbfc_pause_frames7", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames7),
158 QL_OFF(nic_stats.tx_cbfc_pause_frames7)},
159 {"rx_cbfc_pause_frames0", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames0),
160 QL_OFF(nic_stats.rx_cbfc_pause_frames0)},
161 {"rx_cbfc_pause_frames1", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames1),
162 QL_OFF(nic_stats.rx_cbfc_pause_frames1)},
163 {"rx_cbfc_pause_frames2", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames2),
164 QL_OFF(nic_stats.rx_cbfc_pause_frames2)},
165 {"rx_cbfc_pause_frames3", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames3),
166 QL_OFF(nic_stats.rx_cbfc_pause_frames3)},
167 {"rx_cbfc_pause_frames4", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames4),
168 QL_OFF(nic_stats.rx_cbfc_pause_frames4)},
169 {"rx_cbfc_pause_frames5", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames5),
170 QL_OFF(nic_stats.rx_cbfc_pause_frames5)},
171 {"rx_cbfc_pause_frames6", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames6),
172 QL_OFF(nic_stats.rx_cbfc_pause_frames6)},
173 {"rx_cbfc_pause_frames7", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames7),
174 QL_OFF(nic_stats.rx_cbfc_pause_frames7)},
175 {"rx_nic_fifo_drop", QL_SIZEOF(nic_stats.rx_nic_fifo_drop),
176 QL_OFF(nic_stats.rx_nic_fifo_drop)},
177};
178
38static const char ql_gstrings_test[][ETH_GSTRING_LEN] = { 179static const char ql_gstrings_test[][ETH_GSTRING_LEN] = {
39 "Loopback test (offline)" 180 "Loopback test (offline)"
40}; 181};
41#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN) 182#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN)
183#define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats)
42 184
43static int ql_update_ring_coalescing(struct ql_adapter *qdev) 185static int ql_update_ring_coalescing(struct ql_adapter *qdev)
44{ 186{
@@ -183,73 +325,19 @@ quit:
183 QL_DUMP_STAT(qdev); 325 QL_DUMP_STAT(qdev);
184} 326}
185 327
186static char ql_stats_str_arr[][ETH_GSTRING_LEN] = {
187 {"tx_pkts"},
188 {"tx_bytes"},
189 {"tx_mcast_pkts"},
190 {"tx_bcast_pkts"},
191 {"tx_ucast_pkts"},
192 {"tx_ctl_pkts"},
193 {"tx_pause_pkts"},
194 {"tx_64_pkts"},
195 {"tx_65_to_127_pkts"},
196 {"tx_128_to_255_pkts"},
197 {"tx_256_511_pkts"},
198 {"tx_512_to_1023_pkts"},
199 {"tx_1024_to_1518_pkts"},
200 {"tx_1519_to_max_pkts"},
201 {"tx_undersize_pkts"},
202 {"tx_oversize_pkts"},
203 {"rx_bytes"},
204 {"rx_bytes_ok"},
205 {"rx_pkts"},
206 {"rx_pkts_ok"},
207 {"rx_bcast_pkts"},
208 {"rx_mcast_pkts"},
209 {"rx_ucast_pkts"},
210 {"rx_undersize_pkts"},
211 {"rx_oversize_pkts"},
212 {"rx_jabber_pkts"},
213 {"rx_undersize_fcerr_pkts"},
214 {"rx_drop_events"},
215 {"rx_fcerr_pkts"},
216 {"rx_align_err"},
217 {"rx_symbol_err"},
218 {"rx_mac_err"},
219 {"rx_ctl_pkts"},
220 {"rx_pause_pkts"},
221 {"rx_64_pkts"},
222 {"rx_65_to_127_pkts"},
223 {"rx_128_255_pkts"},
224 {"rx_256_511_pkts"},
225 {"rx_512_to_1023_pkts"},
226 {"rx_1024_to_1518_pkts"},
227 {"rx_1519_to_max_pkts"},
228 {"rx_len_err_pkts"},
229 {"tx_cbfc_pause_frames0"},
230 {"tx_cbfc_pause_frames1"},
231 {"tx_cbfc_pause_frames2"},
232 {"tx_cbfc_pause_frames3"},
233 {"tx_cbfc_pause_frames4"},
234 {"tx_cbfc_pause_frames5"},
235 {"tx_cbfc_pause_frames6"},
236 {"tx_cbfc_pause_frames7"},
237 {"rx_cbfc_pause_frames0"},
238 {"rx_cbfc_pause_frames1"},
239 {"rx_cbfc_pause_frames2"},
240 {"rx_cbfc_pause_frames3"},
241 {"rx_cbfc_pause_frames4"},
242 {"rx_cbfc_pause_frames5"},
243 {"rx_cbfc_pause_frames6"},
244 {"rx_cbfc_pause_frames7"},
245 {"rx_nic_fifo_drop"},
246};
247
248static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 328static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
249{ 329{
330 int index;
250 switch (stringset) { 331 switch (stringset) {
332 case ETH_SS_TEST:
333 memcpy(buf, *ql_gstrings_test, QLGE_TEST_LEN * ETH_GSTRING_LEN);
334 break;
251 case ETH_SS_STATS: 335 case ETH_SS_STATS:
252 memcpy(buf, ql_stats_str_arr, sizeof(ql_stats_str_arr)); 336 for (index = 0; index < QLGE_STATS_LEN; index++) {
337 memcpy(buf + index * ETH_GSTRING_LEN,
338 ql_gstrings_stats[index].stat_string,
339 ETH_GSTRING_LEN);
340 }
253 break; 341 break;
254 } 342 }
255} 343}
@@ -260,7 +348,7 @@ static int ql_get_sset_count(struct net_device *dev, int sset)
260 case ETH_SS_TEST: 348 case ETH_SS_TEST:
261 return QLGE_TEST_LEN; 349 return QLGE_TEST_LEN;
262 case ETH_SS_STATS: 350 case ETH_SS_STATS:
263 return ARRAY_SIZE(ql_stats_str_arr); 351 return QLGE_STATS_LEN;
264 default: 352 default:
265 return -EOPNOTSUPP; 353 return -EOPNOTSUPP;
266 } 354 }
@@ -271,69 +359,17 @@ ql_get_ethtool_stats(struct net_device *ndev,
271 struct ethtool_stats *stats, u64 *data) 359 struct ethtool_stats *stats, u64 *data)
272{ 360{
273 struct ql_adapter *qdev = netdev_priv(ndev); 361 struct ql_adapter *qdev = netdev_priv(ndev);
274 struct nic_stats *s = &qdev->nic_stats; 362 int index, length;
275 363
364 length = QLGE_STATS_LEN;
276 ql_update_stats(qdev); 365 ql_update_stats(qdev);
277 366
278 *data++ = s->tx_pkts; 367 for (index = 0; index < length; index++) {
279 *data++ = s->tx_bytes; 368 char *p = (char *)qdev +
280 *data++ = s->tx_mcast_pkts; 369 ql_gstrings_stats[index].stat_offset;
281 *data++ = s->tx_bcast_pkts; 370 *data++ = (ql_gstrings_stats[index].sizeof_stat ==
282 *data++ = s->tx_ucast_pkts; 371 sizeof(u64)) ? *(u64 *)p : (*(u32 *)p);
283 *data++ = s->tx_ctl_pkts; 372 }
284 *data++ = s->tx_pause_pkts;
285 *data++ = s->tx_64_pkt;
286 *data++ = s->tx_65_to_127_pkt;
287 *data++ = s->tx_128_to_255_pkt;
288 *data++ = s->tx_256_511_pkt;
289 *data++ = s->tx_512_to_1023_pkt;
290 *data++ = s->tx_1024_to_1518_pkt;
291 *data++ = s->tx_1519_to_max_pkt;
292 *data++ = s->tx_undersize_pkt;
293 *data++ = s->tx_oversize_pkt;
294 *data++ = s->rx_bytes;
295 *data++ = s->rx_bytes_ok;
296 *data++ = s->rx_pkts;
297 *data++ = s->rx_pkts_ok;
298 *data++ = s->rx_bcast_pkts;
299 *data++ = s->rx_mcast_pkts;
300 *data++ = s->rx_ucast_pkts;
301 *data++ = s->rx_undersize_pkts;
302 *data++ = s->rx_oversize_pkts;
303 *data++ = s->rx_jabber_pkts;
304 *data++ = s->rx_undersize_fcerr_pkts;
305 *data++ = s->rx_drop_events;
306 *data++ = s->rx_fcerr_pkts;
307 *data++ = s->rx_align_err;
308 *data++ = s->rx_symbol_err;
309 *data++ = s->rx_mac_err;
310 *data++ = s->rx_ctl_pkts;
311 *data++ = s->rx_pause_pkts;
312 *data++ = s->rx_64_pkts;
313 *data++ = s->rx_65_to_127_pkts;
314 *data++ = s->rx_128_255_pkts;
315 *data++ = s->rx_256_511_pkts;
316 *data++ = s->rx_512_to_1023_pkts;
317 *data++ = s->rx_1024_to_1518_pkts;
318 *data++ = s->rx_1519_to_max_pkts;
319 *data++ = s->rx_len_err_pkts;
320 *data++ = s->tx_cbfc_pause_frames0;
321 *data++ = s->tx_cbfc_pause_frames1;
322 *data++ = s->tx_cbfc_pause_frames2;
323 *data++ = s->tx_cbfc_pause_frames3;
324 *data++ = s->tx_cbfc_pause_frames4;
325 *data++ = s->tx_cbfc_pause_frames5;
326 *data++ = s->tx_cbfc_pause_frames6;
327 *data++ = s->tx_cbfc_pause_frames7;
328 *data++ = s->rx_cbfc_pause_frames0;
329 *data++ = s->rx_cbfc_pause_frames1;
330 *data++ = s->rx_cbfc_pause_frames2;
331 *data++ = s->rx_cbfc_pause_frames3;
332 *data++ = s->rx_cbfc_pause_frames4;
333 *data++ = s->rx_cbfc_pause_frames5;
334 *data++ = s->rx_cbfc_pause_frames6;
335 *data++ = s->rx_cbfc_pause_frames7;
336 *data++ = s->rx_nic_fifo_drop;
337} 373}
338 374
339static int ql_get_settings(struct net_device *ndev, 375static int ql_get_settings(struct net_device *ndev,
@@ -388,30 +424,33 @@ static void ql_get_drvinfo(struct net_device *ndev,
388static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 424static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
389{ 425{
390 struct ql_adapter *qdev = netdev_priv(ndev); 426 struct ql_adapter *qdev = netdev_priv(ndev);
391 /* What we support. */ 427 unsigned short ssys_dev = qdev->pdev->subsystem_device;
392 wol->supported = WAKE_MAGIC; 428
393 /* What we've currently got set. */ 429 /* WOL is only supported for mezz card. */
394 wol->wolopts = qdev->wol; 430 if (ssys_dev == QLGE_MEZZ_SSYS_ID_068 ||
431 ssys_dev == QLGE_MEZZ_SSYS_ID_180) {
432 wol->supported = WAKE_MAGIC;
433 wol->wolopts = qdev->wol;
434 }
395} 435}
396 436
397static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 437static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
398{ 438{
399 struct ql_adapter *qdev = netdev_priv(ndev); 439 struct ql_adapter *qdev = netdev_priv(ndev);
400 int status; 440 unsigned short ssys_dev = qdev->pdev->subsystem_device;
401 441
442 /* WOL is only supported for mezz card. */
443 if (ssys_dev != QLGE_MEZZ_SSYS_ID_068 &&
444 ssys_dev != QLGE_MEZZ_SSYS_ID_180) {
445 netif_info(qdev, drv, qdev->ndev,
446 "WOL is only supported for mezz card\n");
447 return -EOPNOTSUPP;
448 }
402 if (wol->wolopts & ~WAKE_MAGIC) 449 if (wol->wolopts & ~WAKE_MAGIC)
403 return -EINVAL; 450 return -EINVAL;
404 qdev->wol = wol->wolopts; 451 qdev->wol = wol->wolopts;
405 452
406 netif_info(qdev, drv, qdev->ndev, "Set wol option 0x%x\n", qdev->wol); 453 netif_info(qdev, drv, qdev->ndev, "Set wol option 0x%x\n", qdev->wol);
407 if (!qdev->wol) {
408 u32 wol = 0;
409 status = ql_mb_wol_mode(qdev, wol);
410 netif_err(qdev, drv, qdev->ndev, "WOL %s (wol code 0x%x)\n",
411 status == 0 ? "cleared successfully" : "clear failed",
412 wol);
413 }
414
415 return 0; 454 return 0;
416} 455}
417 456
@@ -528,6 +567,8 @@ static void ql_self_test(struct net_device *ndev,
528{ 567{
529 struct ql_adapter *qdev = netdev_priv(ndev); 568 struct ql_adapter *qdev = netdev_priv(ndev);
530 569
570 memset(data, 0, sizeof(u64) * QLGE_TEST_LEN);
571
531 if (netif_running(ndev)) { 572 if (netif_running(ndev)) {
532 set_bit(QL_SELFTEST, &qdev->flags); 573 set_bit(QL_SELFTEST, &qdev->flags);
533 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 574 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 09d8d33171df..3769f5711cc3 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -1433,6 +1433,36 @@ map_error:
1433 return NETDEV_TX_BUSY; 1433 return NETDEV_TX_BUSY;
1434} 1434}
1435 1435
1436/* Categorizing receive firmware frame errors */
1437static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err)
1438{
1439 struct nic_stats *stats = &qdev->nic_stats;
1440
1441 stats->rx_err_count++;
1442
1443 switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1444 case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1445 stats->rx_code_err++;
1446 break;
1447 case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1448 stats->rx_oversize_err++;
1449 break;
1450 case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1451 stats->rx_undersize_err++;
1452 break;
1453 case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1454 stats->rx_preamble_err++;
1455 break;
1456 case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1457 stats->rx_frame_len_err++;
1458 break;
1459 case IB_MAC_IOCB_RSP_ERR_CRC:
1460 stats->rx_crc_err++;
1461 default:
1462 break;
1463 }
1464}
1465
1436/* Process an inbound completion from an rx ring. */ 1466/* Process an inbound completion from an rx ring. */
1437static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev, 1467static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1438 struct rx_ring *rx_ring, 1468 struct rx_ring *rx_ring,
@@ -1499,15 +1529,6 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1499 addr = lbq_desc->p.pg_chunk.va; 1529 addr = lbq_desc->p.pg_chunk.va;
1500 prefetch(addr); 1530 prefetch(addr);
1501 1531
1502
1503 /* Frame error, so drop the packet. */
1504 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1505 netif_info(qdev, drv, qdev->ndev,
1506 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1507 rx_ring->rx_errors++;
1508 goto err_out;
1509 }
1510
1511 /* The max framesize filter on this chip is set higher than 1532 /* The max framesize filter on this chip is set higher than
1512 * MTU since FCoE uses 2k frames. 1533 * MTU since FCoE uses 2k frames.
1513 */ 1534 */
@@ -1546,7 +1567,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1546 struct iphdr *iph = 1567 struct iphdr *iph =
1547 (struct iphdr *) ((u8 *)addr + ETH_HLEN); 1568 (struct iphdr *) ((u8 *)addr + ETH_HLEN);
1548 if (!(iph->frag_off & 1569 if (!(iph->frag_off &
1549 cpu_to_be16(IP_MF|IP_OFFSET))) { 1570 htons(IP_MF|IP_OFFSET))) {
1550 skb->ip_summed = CHECKSUM_UNNECESSARY; 1571 skb->ip_summed = CHECKSUM_UNNECESSARY;
1551 netif_printk(qdev, rx_status, KERN_DEBUG, 1572 netif_printk(qdev, rx_status, KERN_DEBUG,
1552 qdev->ndev, 1573 qdev->ndev,
@@ -1593,15 +1614,6 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1593 memcpy(skb_put(new_skb, length), skb->data, length); 1614 memcpy(skb_put(new_skb, length), skb->data, length);
1594 skb = new_skb; 1615 skb = new_skb;
1595 1616
1596 /* Frame error, so drop the packet. */
1597 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1598 netif_info(qdev, drv, qdev->ndev,
1599 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1600 dev_kfree_skb_any(skb);
1601 rx_ring->rx_errors++;
1602 return;
1603 }
1604
1605 /* loopback self test for ethtool */ 1617 /* loopback self test for ethtool */
1606 if (test_bit(QL_SELFTEST, &qdev->flags)) { 1618 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1607 ql_check_lb_frame(qdev, skb); 1619 ql_check_lb_frame(qdev, skb);
@@ -1619,7 +1631,6 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1619 } 1631 }
1620 1632
1621 prefetch(skb->data); 1633 prefetch(skb->data);
1622 skb->dev = ndev;
1623 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) { 1634 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1624 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, 1635 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1625 "%s Multicast.\n", 1636 "%s Multicast.\n",
@@ -1654,7 +1665,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1654 /* Unfragmented ipv4 UDP frame. */ 1665 /* Unfragmented ipv4 UDP frame. */
1655 struct iphdr *iph = (struct iphdr *) skb->data; 1666 struct iphdr *iph = (struct iphdr *) skb->data;
1656 if (!(iph->frag_off & 1667 if (!(iph->frag_off &
1657 ntohs(IP_MF|IP_OFFSET))) { 1668 htons(IP_MF|IP_OFFSET))) {
1658 skb->ip_summed = CHECKSUM_UNNECESSARY; 1669 skb->ip_summed = CHECKSUM_UNNECESSARY;
1659 netif_printk(qdev, rx_status, KERN_DEBUG, 1670 netif_printk(qdev, rx_status, KERN_DEBUG,
1660 qdev->ndev, 1671 qdev->ndev,
@@ -1908,15 +1919,6 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1908 return; 1919 return;
1909 } 1920 }
1910 1921
1911 /* Frame error, so drop the packet. */
1912 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1913 netif_info(qdev, drv, qdev->ndev,
1914 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1915 dev_kfree_skb_any(skb);
1916 rx_ring->rx_errors++;
1917 return;
1918 }
1919
1920 /* The max framesize filter on this chip is set higher than 1922 /* The max framesize filter on this chip is set higher than
1921 * MTU since FCoE uses 2k frames. 1923 * MTU since FCoE uses 2k frames.
1922 */ 1924 */
@@ -1934,7 +1936,6 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1934 } 1936 }
1935 1937
1936 prefetch(skb->data); 1938 prefetch(skb->data);
1937 skb->dev = ndev;
1938 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) { 1939 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1939 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n", 1940 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1940 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == 1941 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
@@ -1968,7 +1969,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1968 /* Unfragmented ipv4 UDP frame. */ 1969 /* Unfragmented ipv4 UDP frame. */
1969 struct iphdr *iph = (struct iphdr *) skb->data; 1970 struct iphdr *iph = (struct iphdr *) skb->data;
1970 if (!(iph->frag_off & 1971 if (!(iph->frag_off &
1971 ntohs(IP_MF|IP_OFFSET))) { 1972 htons(IP_MF|IP_OFFSET))) {
1972 skb->ip_summed = CHECKSUM_UNNECESSARY; 1973 skb->ip_summed = CHECKSUM_UNNECESSARY;
1973 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, 1974 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1974 "TCP checksum done!\n"); 1975 "TCP checksum done!\n");
@@ -1999,6 +2000,12 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
1999 2000
2000 QL_DUMP_IB_MAC_RSP(ib_mac_rsp); 2001 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2001 2002
2003 /* Frame error, so drop the packet. */
2004 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
2005 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2);
2006 return (unsigned long)length;
2007 }
2008
2002 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { 2009 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2003 /* The data and headers are split into 2010 /* The data and headers are split into
2004 * separate buffers. 2011 * separate buffers.
@@ -2173,8 +2180,7 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2173 ql_write_cq_idx(rx_ring); 2180 ql_write_cq_idx(rx_ring);
2174 tx_ring = &qdev->tx_ring[net_rsp->txq_idx]; 2181 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2175 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) { 2182 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2176 if (atomic_read(&tx_ring->queue_stopped) && 2183 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2177 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2178 /* 2184 /*
2179 * The queue got stopped because the tx_ring was full. 2185 * The queue got stopped because the tx_ring was full.
2180 * Wake it up, because it's now at least 25% empty. 2186 * Wake it up, because it's now at least 25% empty.
@@ -2558,10 +2564,9 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2558 2564
2559 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) { 2565 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2560 netif_info(qdev, tx_queued, qdev->ndev, 2566 netif_info(qdev, tx_queued, qdev->ndev,
2561 "%s: shutting down tx queue %d du to lack of resources.\n", 2567 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2562 __func__, tx_ring_idx); 2568 __func__, tx_ring_idx);
2563 netif_stop_subqueue(ndev, tx_ring->wq_id); 2569 netif_stop_subqueue(ndev, tx_ring->wq_id);
2564 atomic_inc(&tx_ring->queue_stopped);
2565 tx_ring->tx_errors++; 2570 tx_ring->tx_errors++;
2566 return NETDEV_TX_BUSY; 2571 return NETDEV_TX_BUSY;
2567 } 2572 }
@@ -2612,6 +2617,16 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2612 tx_ring->prod_idx, skb->len); 2617 tx_ring->prod_idx, skb->len);
2613 2618
2614 atomic_dec(&tx_ring->tx_count); 2619 atomic_dec(&tx_ring->tx_count);
2620
2621 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2622 netif_stop_subqueue(ndev, tx_ring->wq_id);
2623 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2624 /*
2625 * The queue got stopped because the tx_ring was full.
2626 * Wake it up, because it's now at least 25% empty.
2627 */
2628 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2629 }
2615 return NETDEV_TX_OK; 2630 return NETDEV_TX_OK;
2616} 2631}
2617 2632
@@ -2680,7 +2695,6 @@ static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2680 tx_ring_desc++; 2695 tx_ring_desc++;
2681 } 2696 }
2682 atomic_set(&tx_ring->tx_count, tx_ring->wq_len); 2697 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2683 atomic_set(&tx_ring->queue_stopped, 0);
2684} 2698}
2685 2699
2686static void ql_free_tx_resources(struct ql_adapter *qdev, 2700static void ql_free_tx_resources(struct ql_adapter *qdev,
@@ -2703,10 +2717,9 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2703 &tx_ring->wq_base_dma); 2717 &tx_ring->wq_base_dma);
2704 2718
2705 if ((tx_ring->wq_base == NULL) || 2719 if ((tx_ring->wq_base == NULL) ||
2706 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) { 2720 tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2707 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n"); 2721 goto pci_alloc_err;
2708 return -ENOMEM; 2722
2709 }
2710 tx_ring->q = 2723 tx_ring->q =
2711 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL); 2724 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2712 if (tx_ring->q == NULL) 2725 if (tx_ring->q == NULL)
@@ -2716,6 +2729,9 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2716err: 2729err:
2717 pci_free_consistent(qdev->pdev, tx_ring->wq_size, 2730 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2718 tx_ring->wq_base, tx_ring->wq_base_dma); 2731 tx_ring->wq_base, tx_ring->wq_base_dma);
2732 tx_ring->wq_base = NULL;
2733pci_alloc_err:
2734 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2719 return -ENOMEM; 2735 return -ENOMEM;
2720} 2736}
2721 2737
@@ -4649,7 +4665,7 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
4649 int err = 0; 4665 int err = 0;
4650 4666
4651 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter), 4667 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4652 min(MAX_CPUS, (int)num_online_cpus())); 4668 min(MAX_CPUS, netif_get_num_default_rss_queues()));
4653 if (!ndev) 4669 if (!ndev)
4654 return -ENOMEM; 4670 return -ENOMEM;
4655 4671
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index d1827e887f4e..557a26545d75 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -1256,7 +1256,6 @@ static void __devexit r6040_remove_one(struct pci_dev *pdev)
1256 kfree(lp->mii_bus->irq); 1256 kfree(lp->mii_bus->irq);
1257 mdiobus_free(lp->mii_bus); 1257 mdiobus_free(lp->mii_bus);
1258 netif_napi_del(&lp->napi); 1258 netif_napi_del(&lp->napi);
1259 pci_set_drvdata(pdev, NULL);
1260 pci_iounmap(pdev, lp->base); 1259 pci_iounmap(pdev, lp->base);
1261 pci_release_regions(pdev); 1260 pci_release_regions(pdev);
1262 free_netdev(dev); 1261 free_netdev(dev);
@@ -1278,17 +1277,4 @@ static struct pci_driver r6040_driver = {
1278 .remove = __devexit_p(r6040_remove_one), 1277 .remove = __devexit_p(r6040_remove_one),
1279}; 1278};
1280 1279
1281 1280module_pci_driver(r6040_driver);
1282static int __init r6040_init(void)
1283{
1284 return pci_register_driver(&r6040_driver);
1285}
1286
1287
1288static void __exit r6040_cleanup(void)
1289{
1290 pci_unregister_driver(&r6040_driver);
1291}
1292
1293module_init(r6040_init);
1294module_exit(r6040_cleanup);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index d7a04e091101..be4e00f3f485 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -46,6 +46,8 @@
46#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw" 46#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
47#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw" 47#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
48#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw" 48#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
49#define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw"
50#define FIRMWARE_8168G_1 "rtl_nic/rtl8168g-1.fw"
49 51
50#ifdef RTL8169_DEBUG 52#ifdef RTL8169_DEBUG
51#define assert(expr) \ 53#define assert(expr) \
@@ -141,6 +143,9 @@ enum mac_version {
141 RTL_GIGA_MAC_VER_36, 143 RTL_GIGA_MAC_VER_36,
142 RTL_GIGA_MAC_VER_37, 144 RTL_GIGA_MAC_VER_37,
143 RTL_GIGA_MAC_VER_38, 145 RTL_GIGA_MAC_VER_38,
146 RTL_GIGA_MAC_VER_39,
147 RTL_GIGA_MAC_VER_40,
148 RTL_GIGA_MAC_VER_41,
144 RTL_GIGA_MAC_NONE = 0xff, 149 RTL_GIGA_MAC_NONE = 0xff,
145}; 150};
146 151
@@ -259,6 +264,14 @@ static const struct {
259 [RTL_GIGA_MAC_VER_38] = 264 [RTL_GIGA_MAC_VER_38] =
260 _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1, 265 _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1,
261 JUMBO_9K, false), 266 JUMBO_9K, false),
267 [RTL_GIGA_MAC_VER_39] =
268 _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1,
269 JUMBO_1K, true),
270 [RTL_GIGA_MAC_VER_40] =
271 _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_1,
272 JUMBO_9K, false),
273 [RTL_GIGA_MAC_VER_41] =
274 _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K, false),
262}; 275};
263#undef _R 276#undef _R
264 277
@@ -389,8 +402,12 @@ enum rtl8168_8101_registers {
389 TWSI = 0xd2, 402 TWSI = 0xd2,
390 MCU = 0xd3, 403 MCU = 0xd3,
391#define NOW_IS_OOB (1 << 7) 404#define NOW_IS_OOB (1 << 7)
405#define TX_EMPTY (1 << 5)
406#define RX_EMPTY (1 << 4)
407#define RXTX_EMPTY (TX_EMPTY | RX_EMPTY)
392#define EN_NDP (1 << 3) 408#define EN_NDP (1 << 3)
393#define EN_OOB_RESET (1 << 2) 409#define EN_OOB_RESET (1 << 2)
410#define LINK_LIST_RDY (1 << 1)
394 EFUSEAR = 0xdc, 411 EFUSEAR = 0xdc,
395#define EFUSEAR_FLAG 0x80000000 412#define EFUSEAR_FLAG 0x80000000
396#define EFUSEAR_WRITE_CMD 0x80000000 413#define EFUSEAR_WRITE_CMD 0x80000000
@@ -416,6 +433,7 @@ enum rtl8168_registers {
416#define ERIAR_MASK_SHIFT 12 433#define ERIAR_MASK_SHIFT 12
417#define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT) 434#define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
418#define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT) 435#define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
436#define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT)
419#define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT) 437#define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
420 EPHY_RXER_NUM = 0x7c, 438 EPHY_RXER_NUM = 0x7c,
421 OCPDR = 0xb0, /* OCP GPHY access */ 439 OCPDR = 0xb0, /* OCP GPHY access */
@@ -428,10 +446,14 @@ enum rtl8168_registers {
428#define OCPAR_FLAG 0x80000000 446#define OCPAR_FLAG 0x80000000
429#define OCPAR_GPHY_WRITE_CMD 0x8000f060 447#define OCPAR_GPHY_WRITE_CMD 0x8000f060
430#define OCPAR_GPHY_READ_CMD 0x0000f060 448#define OCPAR_GPHY_READ_CMD 0x0000f060
449 GPHY_OCP = 0xb8,
431 RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */ 450 RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */
432 MISC = 0xf0, /* 8168e only. */ 451 MISC = 0xf0, /* 8168e only. */
433#define TXPLA_RST (1 << 29) 452#define TXPLA_RST (1 << 29)
453#define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */
434#define PWM_EN (1 << 22) 454#define PWM_EN (1 << 22)
455#define RXDV_GATED_EN (1 << 19)
456#define EARLY_TALLY_EN (1 << 16)
435}; 457};
436 458
437enum rtl_register_content { 459enum rtl_register_content {
@@ -721,8 +743,8 @@ struct rtl8169_private {
721 u16 event_slow; 743 u16 event_slow;
722 744
723 struct mdio_ops { 745 struct mdio_ops {
724 void (*write)(void __iomem *, int, int); 746 void (*write)(struct rtl8169_private *, int, int);
725 int (*read)(void __iomem *, int); 747 int (*read)(struct rtl8169_private *, int);
726 } mdio_ops; 748 } mdio_ops;
727 749
728 struct pll_power_ops { 750 struct pll_power_ops {
@@ -736,8 +758,8 @@ struct rtl8169_private {
736 } jumbo_ops; 758 } jumbo_ops;
737 759
738 struct csi_ops { 760 struct csi_ops {
739 void (*write)(void __iomem *, int, int); 761 void (*write)(struct rtl8169_private *, int, int);
740 u32 (*read)(void __iomem *, int); 762 u32 (*read)(struct rtl8169_private *, int);
741 } csi_ops; 763 } csi_ops;
742 764
743 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv); 765 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
@@ -774,6 +796,8 @@ struct rtl8169_private {
774 } phy_action; 796 } phy_action;
775 } *rtl_fw; 797 } *rtl_fw;
776#define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN) 798#define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN)
799
800 u32 ocp_base;
777}; 801};
778 802
779MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); 803MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@ -794,6 +818,8 @@ MODULE_FIRMWARE(FIRMWARE_8168F_1);
794MODULE_FIRMWARE(FIRMWARE_8168F_2); 818MODULE_FIRMWARE(FIRMWARE_8168F_2);
795MODULE_FIRMWARE(FIRMWARE_8402_1); 819MODULE_FIRMWARE(FIRMWARE_8402_1);
796MODULE_FIRMWARE(FIRMWARE_8411_1); 820MODULE_FIRMWARE(FIRMWARE_8411_1);
821MODULE_FIRMWARE(FIRMWARE_8106E_1);
822MODULE_FIRMWARE(FIRMWARE_8168G_1);
797 823
798static void rtl_lock_work(struct rtl8169_private *tp) 824static void rtl_lock_work(struct rtl8169_private *tp)
799{ 825{
@@ -818,47 +844,114 @@ static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
818 } 844 }
819} 845}
820 846
847struct rtl_cond {
848 bool (*check)(struct rtl8169_private *);
849 const char *msg;
850};
851
852static void rtl_udelay(unsigned int d)
853{
854 udelay(d);
855}
856
857static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
858 void (*delay)(unsigned int), unsigned int d, int n,
859 bool high)
860{
861 int i;
862
863 for (i = 0; i < n; i++) {
864 delay(d);
865 if (c->check(tp) == high)
866 return true;
867 }
868 netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n",
869 c->msg, !high, n, d);
870 return false;
871}
872
873static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp,
874 const struct rtl_cond *c,
875 unsigned int d, int n)
876{
877 return rtl_loop_wait(tp, c, rtl_udelay, d, n, true);
878}
879
880static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp,
881 const struct rtl_cond *c,
882 unsigned int d, int n)
883{
884 return rtl_loop_wait(tp, c, rtl_udelay, d, n, false);
885}
886
887static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp,
888 const struct rtl_cond *c,
889 unsigned int d, int n)
890{
891 return rtl_loop_wait(tp, c, msleep, d, n, true);
892}
893
894static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp,
895 const struct rtl_cond *c,
896 unsigned int d, int n)
897{
898 return rtl_loop_wait(tp, c, msleep, d, n, false);
899}
900
901#define DECLARE_RTL_COND(name) \
902static bool name ## _check(struct rtl8169_private *); \
903 \
904static const struct rtl_cond name = { \
905 .check = name ## _check, \
906 .msg = #name \
907}; \
908 \
909static bool name ## _check(struct rtl8169_private *tp)
910
911DECLARE_RTL_COND(rtl_ocpar_cond)
912{
913 void __iomem *ioaddr = tp->mmio_addr;
914
915 return RTL_R32(OCPAR) & OCPAR_FLAG;
916}
917
821static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) 918static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
822{ 919{
823 void __iomem *ioaddr = tp->mmio_addr; 920 void __iomem *ioaddr = tp->mmio_addr;
824 int i;
825 921
826 RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); 922 RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
827 for (i = 0; i < 20; i++) { 923
828 udelay(100); 924 return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
829 if (RTL_R32(OCPAR) & OCPAR_FLAG) 925 RTL_R32(OCPDR) : ~0;
830 break;
831 }
832 return RTL_R32(OCPDR);
833} 926}
834 927
835static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data) 928static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
836{ 929{
837 void __iomem *ioaddr = tp->mmio_addr; 930 void __iomem *ioaddr = tp->mmio_addr;
838 int i;
839 931
840 RTL_W32(OCPDR, data); 932 RTL_W32(OCPDR, data);
841 RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); 933 RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
842 for (i = 0; i < 20; i++) { 934
843 udelay(100); 935 rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
844 if ((RTL_R32(OCPAR) & OCPAR_FLAG) == 0) 936}
845 break; 937
846 } 938DECLARE_RTL_COND(rtl_eriar_cond)
939{
940 void __iomem *ioaddr = tp->mmio_addr;
941
942 return RTL_R32(ERIAR) & ERIAR_FLAG;
847} 943}
848 944
849static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd) 945static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
850{ 946{
851 void __iomem *ioaddr = tp->mmio_addr; 947 void __iomem *ioaddr = tp->mmio_addr;
852 int i;
853 948
854 RTL_W8(ERIDR, cmd); 949 RTL_W8(ERIDR, cmd);
855 RTL_W32(ERIAR, 0x800010e8); 950 RTL_W32(ERIAR, 0x800010e8);
856 msleep(2); 951 msleep(2);
857 for (i = 0; i < 5; i++) { 952
858 udelay(100); 953 if (!rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 5))
859 if (!(RTL_R32(ERIAR) & ERIAR_FLAG)) 954 return;
860 break;
861 }
862 955
863 ocp_write(tp, 0x1, 0x30, 0x00000001); 956 ocp_write(tp, 0x1, 0x30, 0x00000001);
864} 957}
@@ -872,36 +965,27 @@ static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
872 return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10; 965 return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
873} 966}
874 967
875static void rtl8168_driver_start(struct rtl8169_private *tp) 968DECLARE_RTL_COND(rtl_ocp_read_cond)
876{ 969{
877 u16 reg; 970 u16 reg;
878 int i;
879
880 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
881 971
882 reg = rtl8168_get_ocp_reg(tp); 972 reg = rtl8168_get_ocp_reg(tp);
883 973
884 for (i = 0; i < 10; i++) { 974 return ocp_read(tp, 0x0f, reg) & 0x00000800;
885 msleep(10);
886 if (ocp_read(tp, 0x0f, reg) & 0x00000800)
887 break;
888 }
889} 975}
890 976
891static void rtl8168_driver_stop(struct rtl8169_private *tp) 977static void rtl8168_driver_start(struct rtl8169_private *tp)
892{ 978{
893 u16 reg; 979 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
894 int i;
895 980
896 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP); 981 rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10);
982}
897 983
898 reg = rtl8168_get_ocp_reg(tp); 984static void rtl8168_driver_stop(struct rtl8169_private *tp)
985{
986 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
899 987
900 for (i = 0; i < 10; i++) { 988 rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10);
901 msleep(10);
902 if ((ocp_read(tp, 0x0f, reg) & 0x00000800) == 0)
903 break;
904 }
905} 989}
906 990
907static int r8168dp_check_dash(struct rtl8169_private *tp) 991static int r8168dp_check_dash(struct rtl8169_private *tp)
@@ -911,21 +995,114 @@ static int r8168dp_check_dash(struct rtl8169_private *tp)
911 return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0; 995 return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
912} 996}
913 997
914static void r8169_mdio_write(void __iomem *ioaddr, int reg_addr, int value) 998static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
915{ 999{
916 int i; 1000 if (reg & 0xffff0001) {
1001 netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg);
1002 return true;
1003 }
1004 return false;
1005}
1006
1007DECLARE_RTL_COND(rtl_ocp_gphy_cond)
1008{
1009 void __iomem *ioaddr = tp->mmio_addr;
917 1010
918 RTL_W32(PHYAR, 0x80000000 | (reg_addr & 0x1f) << 16 | (value & 0xffff)); 1011 return RTL_R32(GPHY_OCP) & OCPAR_FLAG;
1012}
919 1013
920 for (i = 20; i > 0; i--) { 1014static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
921 /* 1015{
922 * Check if the RTL8169 has completed writing to the specified 1016 void __iomem *ioaddr = tp->mmio_addr;
923 * MII register. 1017
924 */ 1018 if (rtl_ocp_reg_failure(tp, reg))
925 if (!(RTL_R32(PHYAR) & 0x80000000)) 1019 return;
926 break; 1020
927 udelay(25); 1021 RTL_W32(GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
1022
1023 rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
1024}
1025
1026static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
1027{
1028 void __iomem *ioaddr = tp->mmio_addr;
1029
1030 if (rtl_ocp_reg_failure(tp, reg))
1031 return 0;
1032
1033 RTL_W32(GPHY_OCP, reg << 15);
1034
1035 return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
1036 (RTL_R32(GPHY_OCP) & 0xffff) : ~0;
1037}
1038
1039static void rtl_w1w0_phy_ocp(struct rtl8169_private *tp, int reg, int p, int m)
1040{
1041 int val;
1042
1043 val = r8168_phy_ocp_read(tp, reg);
1044 r8168_phy_ocp_write(tp, reg, (val | p) & ~m);
1045}
1046
1047static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1048{
1049 void __iomem *ioaddr = tp->mmio_addr;
1050
1051 if (rtl_ocp_reg_failure(tp, reg))
1052 return;
1053
1054 RTL_W32(OCPDR, OCPAR_FLAG | (reg << 15) | data);
1055}
1056
1057static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
1058{
1059 void __iomem *ioaddr = tp->mmio_addr;
1060
1061 if (rtl_ocp_reg_failure(tp, reg))
1062 return 0;
1063
1064 RTL_W32(OCPDR, reg << 15);
1065
1066 return RTL_R32(OCPDR);
1067}
1068
1069#define OCP_STD_PHY_BASE 0xa400
1070
1071static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
1072{
1073 if (reg == 0x1f) {
1074 tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE;
1075 return;
928 } 1076 }
1077
1078 if (tp->ocp_base != OCP_STD_PHY_BASE)
1079 reg -= 0x10;
1080
1081 r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value);
1082}
1083
1084static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
1085{
1086 if (tp->ocp_base != OCP_STD_PHY_BASE)
1087 reg -= 0x10;
1088
1089 return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
1090}
1091
1092DECLARE_RTL_COND(rtl_phyar_cond)
1093{
1094 void __iomem *ioaddr = tp->mmio_addr;
1095
1096 return RTL_R32(PHYAR) & 0x80000000;
1097}
1098
1099static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
1100{
1101 void __iomem *ioaddr = tp->mmio_addr;
1102
1103 RTL_W32(PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
1104
1105 rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
929 /* 1106 /*
930 * According to hardware specs a 20us delay is required after write 1107 * According to hardware specs a 20us delay is required after write
931 * complete indication, but before sending next command. 1108 * complete indication, but before sending next command.
@@ -933,23 +1110,16 @@ static void r8169_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
933 udelay(20); 1110 udelay(20);
934} 1111}
935 1112
936static int r8169_mdio_read(void __iomem *ioaddr, int reg_addr) 1113static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
937{ 1114{
938 int i, value = -1; 1115 void __iomem *ioaddr = tp->mmio_addr;
1116 int value;
939 1117
940 RTL_W32(PHYAR, 0x0 | (reg_addr & 0x1f) << 16); 1118 RTL_W32(PHYAR, 0x0 | (reg & 0x1f) << 16);
1119
1120 value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
1121 RTL_R32(PHYAR) & 0xffff : ~0;
941 1122
942 for (i = 20; i > 0; i--) {
943 /*
944 * Check if the RTL8169 has completed retrieving data from
945 * the specified MII register.
946 */
947 if (RTL_R32(PHYAR) & 0x80000000) {
948 value = RTL_R32(PHYAR) & 0xffff;
949 break;
950 }
951 udelay(25);
952 }
953 /* 1123 /*
954 * According to hardware specs a 20us delay is required after read 1124 * According to hardware specs a 20us delay is required after read
955 * complete indication, but before sending next command. 1125 * complete indication, but before sending next command.
@@ -959,45 +1129,35 @@ static int r8169_mdio_read(void __iomem *ioaddr, int reg_addr)
959 return value; 1129 return value;
960} 1130}
961 1131
962static void r8168dp_1_mdio_access(void __iomem *ioaddr, int reg_addr, u32 data) 1132static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
963{ 1133{
964 int i; 1134 void __iomem *ioaddr = tp->mmio_addr;
965 1135
966 RTL_W32(OCPDR, data | 1136 RTL_W32(OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
967 ((reg_addr & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
968 RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD); 1137 RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
969 RTL_W32(EPHY_RXER_NUM, 0); 1138 RTL_W32(EPHY_RXER_NUM, 0);
970 1139
971 for (i = 0; i < 100; i++) { 1140 rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
972 mdelay(1);
973 if (!(RTL_R32(OCPAR) & OCPAR_FLAG))
974 break;
975 }
976} 1141}
977 1142
978static void r8168dp_1_mdio_write(void __iomem *ioaddr, int reg_addr, int value) 1143static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
979{ 1144{
980 r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_WRITE_CMD | 1145 r8168dp_1_mdio_access(tp, reg,
981 (value & OCPDR_DATA_MASK)); 1146 OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK));
982} 1147}
983 1148
984static int r8168dp_1_mdio_read(void __iomem *ioaddr, int reg_addr) 1149static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
985{ 1150{
986 int i; 1151 void __iomem *ioaddr = tp->mmio_addr;
987 1152
988 r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_READ_CMD); 1153 r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
989 1154
990 mdelay(1); 1155 mdelay(1);
991 RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD); 1156 RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
992 RTL_W32(EPHY_RXER_NUM, 0); 1157 RTL_W32(EPHY_RXER_NUM, 0);
993 1158
994 for (i = 0; i < 100; i++) { 1159 return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
995 mdelay(1); 1160 RTL_R32(OCPDR) & OCPDR_DATA_MASK : ~0;
996 if (RTL_R32(OCPAR) & OCPAR_FLAG)
997 break;
998 }
999
1000 return RTL_R32(OCPDR) & OCPDR_DATA_MASK;
1001} 1161}
1002 1162
1003#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000 1163#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
@@ -1012,22 +1172,25 @@ static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
1012 RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT); 1172 RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
1013} 1173}
1014 1174
1015static void r8168dp_2_mdio_write(void __iomem *ioaddr, int reg_addr, int value) 1175static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value)
1016{ 1176{
1177 void __iomem *ioaddr = tp->mmio_addr;
1178
1017 r8168dp_2_mdio_start(ioaddr); 1179 r8168dp_2_mdio_start(ioaddr);
1018 1180
1019 r8169_mdio_write(ioaddr, reg_addr, value); 1181 r8169_mdio_write(tp, reg, value);
1020 1182
1021 r8168dp_2_mdio_stop(ioaddr); 1183 r8168dp_2_mdio_stop(ioaddr);
1022} 1184}
1023 1185
1024static int r8168dp_2_mdio_read(void __iomem *ioaddr, int reg_addr) 1186static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
1025{ 1187{
1188 void __iomem *ioaddr = tp->mmio_addr;
1026 int value; 1189 int value;
1027 1190
1028 r8168dp_2_mdio_start(ioaddr); 1191 r8168dp_2_mdio_start(ioaddr);
1029 1192
1030 value = r8169_mdio_read(ioaddr, reg_addr); 1193 value = r8169_mdio_read(tp, reg);
1031 1194
1032 r8168dp_2_mdio_stop(ioaddr); 1195 r8168dp_2_mdio_stop(ioaddr);
1033 1196
@@ -1036,12 +1199,12 @@ static int r8168dp_2_mdio_read(void __iomem *ioaddr, int reg_addr)
1036 1199
1037static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val) 1200static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
1038{ 1201{
1039 tp->mdio_ops.write(tp->mmio_addr, location, val); 1202 tp->mdio_ops.write(tp, location, val);
1040} 1203}
1041 1204
1042static int rtl_readphy(struct rtl8169_private *tp, int location) 1205static int rtl_readphy(struct rtl8169_private *tp, int location)
1043{ 1206{
1044 return tp->mdio_ops.read(tp->mmio_addr, location); 1207 return tp->mdio_ops.read(tp, location);
1045} 1208}
1046 1209
1047static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value) 1210static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
@@ -1072,79 +1235,64 @@ static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
1072 return rtl_readphy(tp, location); 1235 return rtl_readphy(tp, location);
1073} 1236}
1074 1237
1075static void rtl_ephy_write(void __iomem *ioaddr, int reg_addr, int value) 1238DECLARE_RTL_COND(rtl_ephyar_cond)
1076{ 1239{
1077 unsigned int i; 1240 void __iomem *ioaddr = tp->mmio_addr;
1241
1242 return RTL_R32(EPHYAR) & EPHYAR_FLAG;
1243}
1244
1245static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
1246{
1247 void __iomem *ioaddr = tp->mmio_addr;
1078 1248
1079 RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) | 1249 RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
1080 (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); 1250 (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1081 1251
1082 for (i = 0; i < 100; i++) { 1252 rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
1083 if (!(RTL_R32(EPHYAR) & EPHYAR_FLAG)) 1253
1084 break; 1254 udelay(10);
1085 udelay(10);
1086 }
1087} 1255}
1088 1256
1089static u16 rtl_ephy_read(void __iomem *ioaddr, int reg_addr) 1257static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
1090{ 1258{
1091 u16 value = 0xffff; 1259 void __iomem *ioaddr = tp->mmio_addr;
1092 unsigned int i;
1093 1260
1094 RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); 1261 RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1095 1262
1096 for (i = 0; i < 100; i++) { 1263 return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
1097 if (RTL_R32(EPHYAR) & EPHYAR_FLAG) { 1264 RTL_R32(EPHYAR) & EPHYAR_DATA_MASK : ~0;
1098 value = RTL_R32(EPHYAR) & EPHYAR_DATA_MASK;
1099 break;
1100 }
1101 udelay(10);
1102 }
1103
1104 return value;
1105} 1265}
1106 1266
1107static 1267static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
1108void rtl_eri_write(void __iomem *ioaddr, int addr, u32 mask, u32 val, int type) 1268 u32 val, int type)
1109{ 1269{
1110 unsigned int i; 1270 void __iomem *ioaddr = tp->mmio_addr;
1111 1271
1112 BUG_ON((addr & 3) || (mask == 0)); 1272 BUG_ON((addr & 3) || (mask == 0));
1113 RTL_W32(ERIDR, val); 1273 RTL_W32(ERIDR, val);
1114 RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr); 1274 RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
1115 1275
1116 for (i = 0; i < 100; i++) { 1276 rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
1117 if (!(RTL_R32(ERIAR) & ERIAR_FLAG))
1118 break;
1119 udelay(100);
1120 }
1121} 1277}
1122 1278
1123static u32 rtl_eri_read(void __iomem *ioaddr, int addr, int type) 1279static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
1124{ 1280{
1125 u32 value = ~0x00; 1281 void __iomem *ioaddr = tp->mmio_addr;
1126 unsigned int i;
1127 1282
1128 RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr); 1283 RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
1129 1284
1130 for (i = 0; i < 100; i++) { 1285 return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
1131 if (RTL_R32(ERIAR) & ERIAR_FLAG) { 1286 RTL_R32(ERIDR) : ~0;
1132 value = RTL_R32(ERIDR);
1133 break;
1134 }
1135 udelay(100);
1136 }
1137
1138 return value;
1139} 1287}
1140 1288
1141static void 1289static void rtl_w1w0_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
1142rtl_w1w0_eri(void __iomem *ioaddr, int addr, u32 mask, u32 p, u32 m, int type) 1290 u32 m, int type)
1143{ 1291{
1144 u32 val; 1292 u32 val;
1145 1293
1146 val = rtl_eri_read(ioaddr, addr, type); 1294 val = rtl_eri_read(tp, addr, type);
1147 rtl_eri_write(ioaddr, addr, mask, (val & ~m) | p, type); 1295 rtl_eri_write(tp, addr, mask, (val & ~m) | p, type);
1148} 1296}
1149 1297
1150struct exgmac_reg { 1298struct exgmac_reg {
@@ -1153,31 +1301,30 @@ struct exgmac_reg {
1153 u32 val; 1301 u32 val;
1154}; 1302};
1155 1303
1156static void rtl_write_exgmac_batch(void __iomem *ioaddr, 1304static void rtl_write_exgmac_batch(struct rtl8169_private *tp,
1157 const struct exgmac_reg *r, int len) 1305 const struct exgmac_reg *r, int len)
1158{ 1306{
1159 while (len-- > 0) { 1307 while (len-- > 0) {
1160 rtl_eri_write(ioaddr, r->addr, r->mask, r->val, ERIAR_EXGMAC); 1308 rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC);
1161 r++; 1309 r++;
1162 } 1310 }
1163} 1311}
1164 1312
1165static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr) 1313DECLARE_RTL_COND(rtl_efusear_cond)
1166{ 1314{
1167 u8 value = 0xff; 1315 void __iomem *ioaddr = tp->mmio_addr;
1168 unsigned int i;
1169 1316
1170 RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT); 1317 return RTL_R32(EFUSEAR) & EFUSEAR_FLAG;
1318}
1171 1319
1172 for (i = 0; i < 300; i++) { 1320static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
1173 if (RTL_R32(EFUSEAR) & EFUSEAR_FLAG) { 1321{
1174 value = RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK; 1322 void __iomem *ioaddr = tp->mmio_addr;
1175 break;
1176 }
1177 udelay(100);
1178 }
1179 1323
1180 return value; 1324 RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
1325
1326 return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
1327 RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
1181} 1328}
1182 1329
1183static u16 rtl_get_events(struct rtl8169_private *tp) 1330static u16 rtl_get_events(struct rtl8169_private *tp)
@@ -1276,48 +1423,48 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
1276 if (tp->mac_version == RTL_GIGA_MAC_VER_34 || 1423 if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
1277 tp->mac_version == RTL_GIGA_MAC_VER_38) { 1424 tp->mac_version == RTL_GIGA_MAC_VER_38) {
1278 if (RTL_R8(PHYstatus) & _1000bpsF) { 1425 if (RTL_R8(PHYstatus) & _1000bpsF) {
1279 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111, 1426 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1280 0x00000011, ERIAR_EXGMAC); 1427 ERIAR_EXGMAC);
1281 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111, 1428 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1282 0x00000005, ERIAR_EXGMAC); 1429 ERIAR_EXGMAC);
1283 } else if (RTL_R8(PHYstatus) & _100bps) { 1430 } else if (RTL_R8(PHYstatus) & _100bps) {
1284 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111, 1431 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1285 0x0000001f, ERIAR_EXGMAC); 1432 ERIAR_EXGMAC);
1286 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111, 1433 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1287 0x00000005, ERIAR_EXGMAC); 1434 ERIAR_EXGMAC);
1288 } else { 1435 } else {
1289 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111, 1436 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1290 0x0000001f, ERIAR_EXGMAC); 1437 ERIAR_EXGMAC);
1291 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111, 1438 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1292 0x0000003f, ERIAR_EXGMAC); 1439 ERIAR_EXGMAC);
1293 } 1440 }
1294 /* Reset packet filter */ 1441 /* Reset packet filter */
1295 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, 1442 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
1296 ERIAR_EXGMAC); 1443 ERIAR_EXGMAC);
1297 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, 1444 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
1298 ERIAR_EXGMAC); 1445 ERIAR_EXGMAC);
1299 } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 || 1446 } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
1300 tp->mac_version == RTL_GIGA_MAC_VER_36) { 1447 tp->mac_version == RTL_GIGA_MAC_VER_36) {
1301 if (RTL_R8(PHYstatus) & _1000bpsF) { 1448 if (RTL_R8(PHYstatus) & _1000bpsF) {
1302 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111, 1449 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1303 0x00000011, ERIAR_EXGMAC); 1450 ERIAR_EXGMAC);
1304 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111, 1451 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1305 0x00000005, ERIAR_EXGMAC); 1452 ERIAR_EXGMAC);
1306 } else { 1453 } else {
1307 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111, 1454 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1308 0x0000001f, ERIAR_EXGMAC); 1455 ERIAR_EXGMAC);
1309 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111, 1456 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1310 0x0000003f, ERIAR_EXGMAC); 1457 ERIAR_EXGMAC);
1311 } 1458 }
1312 } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) { 1459 } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
1313 if (RTL_R8(PHYstatus) & _10bps) { 1460 if (RTL_R8(PHYstatus) & _10bps) {
1314 rtl_eri_write(ioaddr, 0x1d0, ERIAR_MASK_0011, 1461 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02,
1315 0x4d02, ERIAR_EXGMAC); 1462 ERIAR_EXGMAC);
1316 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_0011, 1463 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060,
1317 0x0060, ERIAR_EXGMAC); 1464 ERIAR_EXGMAC);
1318 } else { 1465 } else {
1319 rtl_eri_write(ioaddr, 0x1d0, ERIAR_MASK_0011, 1466 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000,
1320 0x0000, ERIAR_EXGMAC); 1467 ERIAR_EXGMAC);
1321 } 1468 }
1322 } 1469 }
1323} 1470}
@@ -1784,6 +1931,13 @@ static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1784 } 1931 }
1785} 1932}
1786 1933
1934DECLARE_RTL_COND(rtl_counters_cond)
1935{
1936 void __iomem *ioaddr = tp->mmio_addr;
1937
1938 return RTL_R32(CounterAddrLow) & CounterDump;
1939}
1940
1787static void rtl8169_update_counters(struct net_device *dev) 1941static void rtl8169_update_counters(struct net_device *dev)
1788{ 1942{
1789 struct rtl8169_private *tp = netdev_priv(dev); 1943 struct rtl8169_private *tp = netdev_priv(dev);
@@ -1792,7 +1946,6 @@ static void rtl8169_update_counters(struct net_device *dev)
1792 struct rtl8169_counters *counters; 1946 struct rtl8169_counters *counters;
1793 dma_addr_t paddr; 1947 dma_addr_t paddr;
1794 u32 cmd; 1948 u32 cmd;
1795 int wait = 1000;
1796 1949
1797 /* 1950 /*
1798 * Some chips are unable to dump tally counters when the receiver 1951 * Some chips are unable to dump tally counters when the receiver
@@ -1810,13 +1963,8 @@ static void rtl8169_update_counters(struct net_device *dev)
1810 RTL_W32(CounterAddrLow, cmd); 1963 RTL_W32(CounterAddrLow, cmd);
1811 RTL_W32(CounterAddrLow, cmd | CounterDump); 1964 RTL_W32(CounterAddrLow, cmd | CounterDump);
1812 1965
1813 while (wait--) { 1966 if (rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000))
1814 if ((RTL_R32(CounterAddrLow) & CounterDump) == 0) { 1967 memcpy(&tp->counters, counters, sizeof(*counters));
1815 memcpy(&tp->counters, counters, sizeof(*counters));
1816 break;
1817 }
1818 udelay(10);
1819 }
1820 1968
1821 RTL_W32(CounterAddrLow, 0); 1969 RTL_W32(CounterAddrLow, 0);
1822 RTL_W32(CounterAddrHigh, 0); 1970 RTL_W32(CounterAddrHigh, 0);
@@ -1894,6 +2042,10 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1894 u32 val; 2042 u32 val;
1895 int mac_version; 2043 int mac_version;
1896 } mac_info[] = { 2044 } mac_info[] = {
2045 /* 8168G family. */
2046 { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 },
2047 { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 },
2048
1897 /* 8168F family. */ 2049 /* 8168F family. */
1898 { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 }, 2050 { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 },
1899 { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 }, 2051 { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 },
@@ -1933,6 +2085,8 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1933 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 }, 2085 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
1934 2086
1935 /* 8101 family. */ 2087 /* 8101 family. */
2088 { 0x7cf00000, 0x44900000, RTL_GIGA_MAC_VER_39 },
2089 { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 },
1936 { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 }, 2090 { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 },
1937 { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 }, 2091 { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
1938 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 }, 2092 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
@@ -2186,7 +2340,7 @@ static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2186 index -= regno; 2340 index -= regno;
2187 break; 2341 break;
2188 case PHY_READ_EFUSE: 2342 case PHY_READ_EFUSE:
2189 predata = rtl8168d_efuse_read(tp->mmio_addr, regno); 2343 predata = rtl8168d_efuse_read(tp, regno);
2190 index++; 2344 index++;
2191 break; 2345 break;
2192 case PHY_CLEAR_READCOUNT: 2346 case PHY_CLEAR_READCOUNT:
@@ -2626,7 +2780,6 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
2626 { 0x1f, 0x0000 }, 2780 { 0x1f, 0x0000 },
2627 { 0x0d, 0xf880 } 2781 { 0x0d, 0xf880 }
2628 }; 2782 };
2629 void __iomem *ioaddr = tp->mmio_addr;
2630 2783
2631 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); 2784 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2632 2785
@@ -2638,7 +2791,7 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
2638 rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef); 2791 rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef);
2639 rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00); 2792 rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00);
2640 2793
2641 if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) { 2794 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2642 static const struct phy_reg phy_reg_init[] = { 2795 static const struct phy_reg phy_reg_init[] = {
2643 { 0x1f, 0x0002 }, 2796 { 0x1f, 0x0002 },
2644 { 0x05, 0x669a }, 2797 { 0x05, 0x669a },
@@ -2738,11 +2891,10 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
2738 { 0x1f, 0x0000 }, 2891 { 0x1f, 0x0000 },
2739 { 0x0d, 0xf880 } 2892 { 0x0d, 0xf880 }
2740 }; 2893 };
2741 void __iomem *ioaddr = tp->mmio_addr;
2742 2894
2743 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); 2895 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2744 2896
2745 if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) { 2897 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2746 static const struct phy_reg phy_reg_init[] = { 2898 static const struct phy_reg phy_reg_init[] = {
2747 { 0x1f, 0x0002 }, 2899 { 0x1f, 0x0002 },
2748 { 0x05, 0x669a }, 2900 { 0x05, 0x669a },
@@ -3010,8 +3162,7 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
3010 rtl_writephy(tp, 0x1f, 0x0000); 3162 rtl_writephy(tp, 0x1f, 0x0000);
3011 3163
3012 /* EEE setting */ 3164 /* EEE setting */
3013 rtl_w1w0_eri(tp->mmio_addr, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, 3165 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC);
3014 ERIAR_EXGMAC);
3015 rtl_writephy(tp, 0x1f, 0x0005); 3166 rtl_writephy(tp, 0x1f, 0x0005);
3016 rtl_writephy(tp, 0x05, 0x8b85); 3167 rtl_writephy(tp, 0x05, 0x8b85);
3017 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000); 3168 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
@@ -3115,7 +3266,6 @@ static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
3115 3266
3116static void rtl8411_hw_phy_config(struct rtl8169_private *tp) 3267static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3117{ 3268{
3118 void __iomem *ioaddr = tp->mmio_addr;
3119 static const struct phy_reg phy_reg_init[] = { 3269 static const struct phy_reg phy_reg_init[] = {
3120 /* Channel estimation fine tune */ 3270 /* Channel estimation fine tune */
3121 { 0x1f, 0x0003 }, 3271 { 0x1f, 0x0003 },
@@ -3189,7 +3339,7 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3189 rtl_writephy(tp, 0x1f, 0x0000); 3339 rtl_writephy(tp, 0x1f, 0x0000);
3190 3340
3191 /* eee setting */ 3341 /* eee setting */
3192 rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC); 3342 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
3193 rtl_writephy(tp, 0x1f, 0x0005); 3343 rtl_writephy(tp, 0x1f, 0x0005);
3194 rtl_writephy(tp, 0x05, 0x8b85); 3344 rtl_writephy(tp, 0x05, 0x8b85);
3195 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000); 3345 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
@@ -3211,6 +3361,55 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3211 rtl_writephy(tp, 0x1f, 0x0000); 3361 rtl_writephy(tp, 0x1f, 0x0000);
3212} 3362}
3213 3363
3364static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
3365{
3366 static const u16 mac_ocp_patch[] = {
3367 0xe008, 0xe01b, 0xe01d, 0xe01f,
3368 0xe021, 0xe023, 0xe025, 0xe027,
3369 0x49d2, 0xf10d, 0x766c, 0x49e2,
3370 0xf00a, 0x1ec0, 0x8ee1, 0xc60a,
3371
3372 0x77c0, 0x4870, 0x9fc0, 0x1ea0,
3373 0xc707, 0x8ee1, 0x9d6c, 0xc603,
3374 0xbe00, 0xb416, 0x0076, 0xe86c,
3375 0xc602, 0xbe00, 0x0000, 0xc602,
3376
3377 0xbe00, 0x0000, 0xc602, 0xbe00,
3378 0x0000, 0xc602, 0xbe00, 0x0000,
3379 0xc602, 0xbe00, 0x0000, 0xc602,
3380 0xbe00, 0x0000, 0xc602, 0xbe00,
3381
3382 0x0000, 0x0000, 0x0000, 0x0000
3383 };
3384 u32 i;
3385
3386 /* Patch code for GPHY reset */
3387 for (i = 0; i < ARRAY_SIZE(mac_ocp_patch); i++)
3388 r8168_mac_ocp_write(tp, 0xf800 + 2*i, mac_ocp_patch[i]);
3389 r8168_mac_ocp_write(tp, 0xfc26, 0x8000);
3390 r8168_mac_ocp_write(tp, 0xfc28, 0x0075);
3391
3392 rtl_apply_firmware(tp);
3393
3394 if (r8168_phy_ocp_read(tp, 0xa460) & 0x0100)
3395 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x8000);
3396 else
3397 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x8000, 0x0000);
3398
3399 if (r8168_phy_ocp_read(tp, 0xa466) & 0x0100)
3400 rtl_w1w0_phy_ocp(tp, 0xc41a, 0x0002, 0x0000);
3401 else
3402 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x0002);
3403
3404 rtl_w1w0_phy_ocp(tp, 0xa442, 0x000c, 0x0000);
3405 rtl_w1w0_phy_ocp(tp, 0xa4b2, 0x0004, 0x0000);
3406
3407 r8168_phy_ocp_write(tp, 0xa436, 0x8012);
3408 rtl_w1w0_phy_ocp(tp, 0xa438, 0x8000, 0x0000);
3409
3410 rtl_w1w0_phy_ocp(tp, 0xc422, 0x4000, 0x2000);
3411}
3412
3214static void rtl8102e_hw_phy_config(struct rtl8169_private *tp) 3413static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
3215{ 3414{
3216 static const struct phy_reg phy_reg_init[] = { 3415 static const struct phy_reg phy_reg_init[] = {
@@ -3256,8 +3455,6 @@ static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
3256 3455
3257static void rtl8402_hw_phy_config(struct rtl8169_private *tp) 3456static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
3258{ 3457{
3259 void __iomem *ioaddr = tp->mmio_addr;
3260
3261 /* Disable ALDPS before setting firmware */ 3458 /* Disable ALDPS before setting firmware */
3262 rtl_writephy(tp, 0x1f, 0x0000); 3459 rtl_writephy(tp, 0x1f, 0x0000);
3263 rtl_writephy(tp, 0x18, 0x0310); 3460 rtl_writephy(tp, 0x18, 0x0310);
@@ -3266,13 +3463,35 @@ static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
3266 rtl_apply_firmware(tp); 3463 rtl_apply_firmware(tp);
3267 3464
3268 /* EEE setting */ 3465 /* EEE setting */
3269 rtl_eri_write(ioaddr, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 3466 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3270 rtl_writephy(tp, 0x1f, 0x0004); 3467 rtl_writephy(tp, 0x1f, 0x0004);
3271 rtl_writephy(tp, 0x10, 0x401f); 3468 rtl_writephy(tp, 0x10, 0x401f);
3272 rtl_writephy(tp, 0x19, 0x7030); 3469 rtl_writephy(tp, 0x19, 0x7030);
3273 rtl_writephy(tp, 0x1f, 0x0000); 3470 rtl_writephy(tp, 0x1f, 0x0000);
3274} 3471}
3275 3472
3473static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
3474{
3475 static const struct phy_reg phy_reg_init[] = {
3476 { 0x1f, 0x0004 },
3477 { 0x10, 0xc07f },
3478 { 0x19, 0x7030 },
3479 { 0x1f, 0x0000 }
3480 };
3481
3482 /* Disable ALDPS before ram code */
3483 rtl_writephy(tp, 0x1f, 0x0000);
3484 rtl_writephy(tp, 0x18, 0x0310);
3485 msleep(100);
3486
3487 rtl_apply_firmware(tp);
3488
3489 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3490 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3491
3492 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3493}
3494
3276static void rtl_hw_phy_config(struct net_device *dev) 3495static void rtl_hw_phy_config(struct net_device *dev)
3277{ 3496{
3278 struct rtl8169_private *tp = netdev_priv(dev); 3497 struct rtl8169_private *tp = netdev_priv(dev);
@@ -3369,6 +3588,15 @@ static void rtl_hw_phy_config(struct net_device *dev)
3369 rtl8411_hw_phy_config(tp); 3588 rtl8411_hw_phy_config(tp);
3370 break; 3589 break;
3371 3590
3591 case RTL_GIGA_MAC_VER_39:
3592 rtl8106e_hw_phy_config(tp);
3593 break;
3594
3595 case RTL_GIGA_MAC_VER_40:
3596 rtl8168g_1_hw_phy_config(tp);
3597 break;
3598
3599 case RTL_GIGA_MAC_VER_41:
3372 default: 3600 default:
3373 break; 3601 break;
3374 } 3602 }
@@ -3426,18 +3654,16 @@ static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
3426 free_netdev(dev); 3654 free_netdev(dev);
3427} 3655}
3428 3656
3657DECLARE_RTL_COND(rtl_phy_reset_cond)
3658{
3659 return tp->phy_reset_pending(tp);
3660}
3661
3429static void rtl8169_phy_reset(struct net_device *dev, 3662static void rtl8169_phy_reset(struct net_device *dev,
3430 struct rtl8169_private *tp) 3663 struct rtl8169_private *tp)
3431{ 3664{
3432 unsigned int i;
3433
3434 tp->phy_reset_enable(tp); 3665 tp->phy_reset_enable(tp);
3435 for (i = 0; i < 100; i++) { 3666 rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100);
3436 if (!tp->phy_reset_pending(tp))
3437 return;
3438 msleep(1);
3439 }
3440 netif_err(tp, link, dev, "PHY reset failed\n");
3441} 3667}
3442 3668
3443static bool rtl_tbi_enabled(struct rtl8169_private *tp) 3669static bool rtl_tbi_enabled(struct rtl8169_private *tp)
@@ -3512,7 +3738,7 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
3512 low >> 16 }, 3738 low >> 16 },
3513 }; 3739 };
3514 3740
3515 rtl_write_exgmac_batch(ioaddr, e, ARRAY_SIZE(e)); 3741 rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e));
3516 } 3742 }
3517 3743
3518 RTL_W8(Cfg9346, Cfg9346_Lock); 3744 RTL_W8(Cfg9346, Cfg9346_Lock);
@@ -3589,6 +3815,11 @@ static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
3589 ops->write = r8168dp_2_mdio_write; 3815 ops->write = r8168dp_2_mdio_write;
3590 ops->read = r8168dp_2_mdio_read; 3816 ops->read = r8168dp_2_mdio_read;
3591 break; 3817 break;
3818 case RTL_GIGA_MAC_VER_40:
3819 case RTL_GIGA_MAC_VER_41:
3820 ops->write = r8168g_mdio_write;
3821 ops->read = r8168g_mdio_read;
3822 break;
3592 default: 3823 default:
3593 ops->write = r8169_mdio_write; 3824 ops->write = r8169_mdio_write;
3594 ops->read = r8169_mdio_read; 3825 ops->read = r8169_mdio_read;
@@ -3608,6 +3839,9 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3608 case RTL_GIGA_MAC_VER_34: 3839 case RTL_GIGA_MAC_VER_34:
3609 case RTL_GIGA_MAC_VER_37: 3840 case RTL_GIGA_MAC_VER_37:
3610 case RTL_GIGA_MAC_VER_38: 3841 case RTL_GIGA_MAC_VER_38:
3842 case RTL_GIGA_MAC_VER_39:
3843 case RTL_GIGA_MAC_VER_40:
3844 case RTL_GIGA_MAC_VER_41:
3611 RTL_W32(RxConfig, RTL_R32(RxConfig) | 3845 RTL_W32(RxConfig, RTL_R32(RxConfig) |
3612 AcceptBroadcast | AcceptMulticast | AcceptMyPhys); 3846 AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
3613 break; 3847 break;
@@ -3761,7 +3995,7 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
3761 3995
3762 if (tp->mac_version == RTL_GIGA_MAC_VER_32 || 3996 if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
3763 tp->mac_version == RTL_GIGA_MAC_VER_33) 3997 tp->mac_version == RTL_GIGA_MAC_VER_33)
3764 rtl_ephy_write(ioaddr, 0x19, 0xff64); 3998 rtl_ephy_write(tp, 0x19, 0xff64);
3765 3999
3766 if (rtl_wol_pll_power_down(tp)) 4000 if (rtl_wol_pll_power_down(tp))
3767 return; 4001 return;
@@ -3830,6 +4064,7 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
3830 case RTL_GIGA_MAC_VER_29: 4064 case RTL_GIGA_MAC_VER_29:
3831 case RTL_GIGA_MAC_VER_30: 4065 case RTL_GIGA_MAC_VER_30:
3832 case RTL_GIGA_MAC_VER_37: 4066 case RTL_GIGA_MAC_VER_37:
4067 case RTL_GIGA_MAC_VER_39:
3833 ops->down = r810x_pll_power_down; 4068 ops->down = r810x_pll_power_down;
3834 ops->up = r810x_pll_power_up; 4069 ops->up = r810x_pll_power_up;
3835 break; 4070 break;
@@ -3855,6 +4090,8 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
3855 case RTL_GIGA_MAC_VER_35: 4090 case RTL_GIGA_MAC_VER_35:
3856 case RTL_GIGA_MAC_VER_36: 4091 case RTL_GIGA_MAC_VER_36:
3857 case RTL_GIGA_MAC_VER_38: 4092 case RTL_GIGA_MAC_VER_38:
4093 case RTL_GIGA_MAC_VER_40:
4094 case RTL_GIGA_MAC_VER_41:
3858 ops->down = r8168_pll_power_down; 4095 ops->down = r8168_pll_power_down;
3859 ops->up = r8168_pll_power_up; 4096 ops->up = r8168_pll_power_up;
3860 break; 4097 break;
@@ -4051,6 +4288,8 @@ static void __devinit rtl_init_jumbo_ops(struct rtl8169_private *tp)
4051 * No action needed for jumbo frames with 8169. 4288 * No action needed for jumbo frames with 8169.
4052 * No jumbo for 810x at all. 4289 * No jumbo for 810x at all.
4053 */ 4290 */
4291 case RTL_GIGA_MAC_VER_40:
4292 case RTL_GIGA_MAC_VER_41:
4054 default: 4293 default:
4055 ops->disable = NULL; 4294 ops->disable = NULL;
4056 ops->enable = NULL; 4295 ops->enable = NULL;
@@ -4058,20 +4297,20 @@ static void __devinit rtl_init_jumbo_ops(struct rtl8169_private *tp)
4058 } 4297 }
4059} 4298}
4060 4299
4300DECLARE_RTL_COND(rtl_chipcmd_cond)
4301{
4302 void __iomem *ioaddr = tp->mmio_addr;
4303
4304 return RTL_R8(ChipCmd) & CmdReset;
4305}
4306
4061static void rtl_hw_reset(struct rtl8169_private *tp) 4307static void rtl_hw_reset(struct rtl8169_private *tp)
4062{ 4308{
4063 void __iomem *ioaddr = tp->mmio_addr; 4309 void __iomem *ioaddr = tp->mmio_addr;
4064 int i;
4065 4310
4066 /* Soft reset the chip. */
4067 RTL_W8(ChipCmd, CmdReset); 4311 RTL_W8(ChipCmd, CmdReset);
4068 4312
4069 /* Check that the chip has finished the reset. */ 4313 rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
4070 for (i = 0; i < 100; i++) {
4071 if ((RTL_R8(ChipCmd) & CmdReset) == 0)
4072 break;
4073 udelay(100);
4074 }
4075} 4314}
4076 4315
4077static void rtl_request_uncached_firmware(struct rtl8169_private *tp) 4316static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
@@ -4125,6 +4364,20 @@ static void rtl_rx_close(struct rtl8169_private *tp)
4125 RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK); 4364 RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
4126} 4365}
4127 4366
4367DECLARE_RTL_COND(rtl_npq_cond)
4368{
4369 void __iomem *ioaddr = tp->mmio_addr;
4370
4371 return RTL_R8(TxPoll) & NPQ;
4372}
4373
4374DECLARE_RTL_COND(rtl_txcfg_empty_cond)
4375{
4376 void __iomem *ioaddr = tp->mmio_addr;
4377
4378 return RTL_R32(TxConfig) & TXCFG_EMPTY;
4379}
4380
4128static void rtl8169_hw_reset(struct rtl8169_private *tp) 4381static void rtl8169_hw_reset(struct rtl8169_private *tp)
4129{ 4382{
4130 void __iomem *ioaddr = tp->mmio_addr; 4383 void __iomem *ioaddr = tp->mmio_addr;
@@ -4137,16 +4390,16 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
4137 if (tp->mac_version == RTL_GIGA_MAC_VER_27 || 4390 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
4138 tp->mac_version == RTL_GIGA_MAC_VER_28 || 4391 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
4139 tp->mac_version == RTL_GIGA_MAC_VER_31) { 4392 tp->mac_version == RTL_GIGA_MAC_VER_31) {
4140 while (RTL_R8(TxPoll) & NPQ) 4393 rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
4141 udelay(20);
4142 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 || 4394 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
4143 tp->mac_version == RTL_GIGA_MAC_VER_35 || 4395 tp->mac_version == RTL_GIGA_MAC_VER_35 ||
4144 tp->mac_version == RTL_GIGA_MAC_VER_36 || 4396 tp->mac_version == RTL_GIGA_MAC_VER_36 ||
4145 tp->mac_version == RTL_GIGA_MAC_VER_37 || 4397 tp->mac_version == RTL_GIGA_MAC_VER_37 ||
4398 tp->mac_version == RTL_GIGA_MAC_VER_40 ||
4399 tp->mac_version == RTL_GIGA_MAC_VER_41 ||
4146 tp->mac_version == RTL_GIGA_MAC_VER_38) { 4400 tp->mac_version == RTL_GIGA_MAC_VER_38) {
4147 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq); 4401 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4148 while (!(RTL_R32(TxConfig) & TXCFG_EMPTY)) 4402 rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
4149 udelay(100);
4150 } else { 4403 } else {
4151 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq); 4404 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4152 udelay(100); 4405 udelay(100);
@@ -4352,15 +4605,12 @@ static void rtl_hw_start_8169(struct net_device *dev)
4352static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value) 4605static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
4353{ 4606{
4354 if (tp->csi_ops.write) 4607 if (tp->csi_ops.write)
4355 tp->csi_ops.write(tp->mmio_addr, addr, value); 4608 tp->csi_ops.write(tp, addr, value);
4356} 4609}
4357 4610
4358static u32 rtl_csi_read(struct rtl8169_private *tp, int addr) 4611static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
4359{ 4612{
4360 if (tp->csi_ops.read) 4613 return tp->csi_ops.read ? tp->csi_ops.read(tp, addr) : ~0;
4361 return tp->csi_ops.read(tp->mmio_addr, addr);
4362 else
4363 return ~0;
4364} 4614}
4365 4615
4366static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits) 4616static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
@@ -4381,73 +4631,56 @@ static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
4381 rtl_csi_access_enable(tp, 0x27000000); 4631 rtl_csi_access_enable(tp, 0x27000000);
4382} 4632}
4383 4633
4384static void r8169_csi_write(void __iomem *ioaddr, int addr, int value) 4634DECLARE_RTL_COND(rtl_csiar_cond)
4385{ 4635{
4386 unsigned int i; 4636 void __iomem *ioaddr = tp->mmio_addr;
4637
4638 return RTL_R32(CSIAR) & CSIAR_FLAG;
4639}
4640
4641static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value)
4642{
4643 void __iomem *ioaddr = tp->mmio_addr;
4387 4644
4388 RTL_W32(CSIDR, value); 4645 RTL_W32(CSIDR, value);
4389 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | 4646 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4390 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); 4647 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4391 4648
4392 for (i = 0; i < 100; i++) { 4649 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4393 if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
4394 break;
4395 udelay(10);
4396 }
4397} 4650}
4398 4651
4399static u32 r8169_csi_read(void __iomem *ioaddr, int addr) 4652static u32 r8169_csi_read(struct rtl8169_private *tp, int addr)
4400{ 4653{
4401 u32 value = ~0x00; 4654 void __iomem *ioaddr = tp->mmio_addr;
4402 unsigned int i;
4403 4655
4404 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | 4656 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
4405 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); 4657 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4406 4658
4407 for (i = 0; i < 100; i++) { 4659 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4408 if (RTL_R32(CSIAR) & CSIAR_FLAG) { 4660 RTL_R32(CSIDR) : ~0;
4409 value = RTL_R32(CSIDR);
4410 break;
4411 }
4412 udelay(10);
4413 }
4414
4415 return value;
4416} 4661}
4417 4662
4418static void r8402_csi_write(void __iomem *ioaddr, int addr, int value) 4663static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value)
4419{ 4664{
4420 unsigned int i; 4665 void __iomem *ioaddr = tp->mmio_addr;
4421 4666
4422 RTL_W32(CSIDR, value); 4667 RTL_W32(CSIDR, value);
4423 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | 4668 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4424 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT | 4669 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
4425 CSIAR_FUNC_NIC); 4670 CSIAR_FUNC_NIC);
4426 4671
4427 for (i = 0; i < 100; i++) { 4672 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4428 if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
4429 break;
4430 udelay(10);
4431 }
4432} 4673}
4433 4674
4434static u32 r8402_csi_read(void __iomem *ioaddr, int addr) 4675static u32 r8402_csi_read(struct rtl8169_private *tp, int addr)
4435{ 4676{
4436 u32 value = ~0x00; 4677 void __iomem *ioaddr = tp->mmio_addr;
4437 unsigned int i;
4438 4678
4439 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC | 4679 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
4440 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); 4680 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4441 4681
4442 for (i = 0; i < 100; i++) { 4682 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4443 if (RTL_R32(CSIAR) & CSIAR_FLAG) { 4683 RTL_R32(CSIDR) : ~0;
4444 value = RTL_R32(CSIDR);
4445 break;
4446 }
4447 udelay(10);
4448 }
4449
4450 return value;
4451} 4684}
4452 4685
4453static void __devinit rtl_init_csi_ops(struct rtl8169_private *tp) 4686static void __devinit rtl_init_csi_ops(struct rtl8169_private *tp)
@@ -4492,13 +4725,14 @@ struct ephy_info {
4492 u16 bits; 4725 u16 bits;
4493}; 4726};
4494 4727
4495static void rtl_ephy_init(void __iomem *ioaddr, const struct ephy_info *e, int len) 4728static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e,
4729 int len)
4496{ 4730{
4497 u16 w; 4731 u16 w;
4498 4732
4499 while (len-- > 0) { 4733 while (len-- > 0) {
4500 w = (rtl_ephy_read(ioaddr, e->offset) & ~e->mask) | e->bits; 4734 w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits;
4501 rtl_ephy_write(ioaddr, e->offset, w); 4735 rtl_ephy_write(tp, e->offset, w);
4502 e++; 4736 e++;
4503 } 4737 }
4504} 4738}
@@ -4582,7 +4816,6 @@ static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
4582 4816
4583static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp) 4817static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
4584{ 4818{
4585 void __iomem *ioaddr = tp->mmio_addr;
4586 static const struct ephy_info e_info_8168cp[] = { 4819 static const struct ephy_info e_info_8168cp[] = {
4587 { 0x01, 0, 0x0001 }, 4820 { 0x01, 0, 0x0001 },
4588 { 0x02, 0x0800, 0x1000 }, 4821 { 0x02, 0x0800, 0x1000 },
@@ -4593,7 +4826,7 @@ static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
4593 4826
4594 rtl_csi_access_enable_2(tp); 4827 rtl_csi_access_enable_2(tp);
4595 4828
4596 rtl_ephy_init(ioaddr, e_info_8168cp, ARRAY_SIZE(e_info_8168cp)); 4829 rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
4597 4830
4598 __rtl_hw_start_8168cp(tp); 4831 __rtl_hw_start_8168cp(tp);
4599} 4832}
@@ -4644,14 +4877,13 @@ static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
4644 4877
4645 RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2); 4878 RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
4646 4879
4647 rtl_ephy_init(ioaddr, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1)); 4880 rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
4648 4881
4649 __rtl_hw_start_8168cp(tp); 4882 __rtl_hw_start_8168cp(tp);
4650} 4883}
4651 4884
4652static void rtl_hw_start_8168c_2(struct rtl8169_private *tp) 4885static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
4653{ 4886{
4654 void __iomem *ioaddr = tp->mmio_addr;
4655 static const struct ephy_info e_info_8168c_2[] = { 4887 static const struct ephy_info e_info_8168c_2[] = {
4656 { 0x01, 0, 0x0001 }, 4888 { 0x01, 0, 0x0001 },
4657 { 0x03, 0x0400, 0x0220 } 4889 { 0x03, 0x0400, 0x0220 }
@@ -4659,7 +4891,7 @@ static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
4659 4891
4660 rtl_csi_access_enable_2(tp); 4892 rtl_csi_access_enable_2(tp);
4661 4893
4662 rtl_ephy_init(ioaddr, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2)); 4894 rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
4663 4895
4664 __rtl_hw_start_8168cp(tp); 4896 __rtl_hw_start_8168cp(tp);
4665} 4897}
@@ -4727,8 +4959,8 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
4727 const struct ephy_info *e = e_info_8168d_4 + i; 4959 const struct ephy_info *e = e_info_8168d_4 + i;
4728 u16 w; 4960 u16 w;
4729 4961
4730 w = rtl_ephy_read(ioaddr, e->offset); 4962 w = rtl_ephy_read(tp, e->offset);
4731 rtl_ephy_write(ioaddr, 0x03, (w & e->mask) | e->bits); 4963 rtl_ephy_write(tp, 0x03, (w & e->mask) | e->bits);
4732 } 4964 }
4733 4965
4734 rtl_enable_clock_request(pdev); 4966 rtl_enable_clock_request(pdev);
@@ -4756,7 +4988,7 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
4756 4988
4757 rtl_csi_access_enable_2(tp); 4989 rtl_csi_access_enable_2(tp);
4758 4990
4759 rtl_ephy_init(ioaddr, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1)); 4991 rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
4760 4992
4761 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 4993 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4762 4994
@@ -4782,19 +5014,18 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
4782 5014
4783 rtl_csi_access_enable_1(tp); 5015 rtl_csi_access_enable_1(tp);
4784 5016
4785 rtl_ephy_init(ioaddr, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2)); 5017 rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
4786 5018
4787 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 5019 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4788 5020
4789 rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 5021 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4790 rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 5022 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4791 rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC); 5023 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
4792 rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); 5024 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
4793 rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); 5025 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
4794 rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC); 5026 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
4795 rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); 5027 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
4796 rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, 5028 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
4797 ERIAR_EXGMAC);
4798 5029
4799 RTL_W8(MaxTxPacketSize, EarlySize); 5030 RTL_W8(MaxTxPacketSize, EarlySize);
4800 5031
@@ -4820,16 +5051,16 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
4820 5051
4821 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 5052 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4822 5053
4823 rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 5054 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4824 rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 5055 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4825 rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC); 5056 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
4826 rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); 5057 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
4827 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); 5058 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
4828 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); 5059 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
4829 rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); 5060 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
4830 rtl_w1w0_eri(ioaddr, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); 5061 rtl_w1w0_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
4831 rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); 5062 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
4832 rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC); 5063 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
4833 5064
4834 RTL_W8(MaxTxPacketSize, EarlySize); 5065 RTL_W8(MaxTxPacketSize, EarlySize);
4835 5066
@@ -4854,10 +5085,9 @@ static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
4854 5085
4855 rtl_hw_start_8168f(tp); 5086 rtl_hw_start_8168f(tp);
4856 5087
4857 rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); 5088 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
4858 5089
4859 rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, 5090 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
4860 ERIAR_EXGMAC);
4861 5091
4862 /* Adjust EEE LED frequency */ 5092 /* Adjust EEE LED frequency */
4863 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); 5093 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
@@ -4865,7 +5095,6 @@ static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
4865 5095
4866static void rtl_hw_start_8411(struct rtl8169_private *tp) 5096static void rtl_hw_start_8411(struct rtl8169_private *tp)
4867{ 5097{
4868 void __iomem *ioaddr = tp->mmio_addr;
4869 static const struct ephy_info e_info_8168f_1[] = { 5098 static const struct ephy_info e_info_8168f_1[] = {
4870 { 0x06, 0x00c0, 0x0020 }, 5099 { 0x06, 0x00c0, 0x0020 },
4871 { 0x0f, 0xffff, 0x5200 }, 5100 { 0x0f, 0xffff, 0x5200 },
@@ -4875,10 +5104,39 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
4875 5104
4876 rtl_hw_start_8168f(tp); 5105 rtl_hw_start_8168f(tp);
4877 5106
4878 rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); 5107 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
4879 5108
4880 rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, 5109 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC);
4881 ERIAR_EXGMAC); 5110}
5111
5112static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
5113{
5114 void __iomem *ioaddr = tp->mmio_addr;
5115 struct pci_dev *pdev = tp->pci_dev;
5116
5117 rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
5118 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
5119 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
5120 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5121
5122 rtl_csi_access_enable_1(tp);
5123
5124 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5125
5126 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5127 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5128
5129 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5130 RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
5131 RTL_W8(MaxTxPacketSize, EarlySize);
5132
5133 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5134 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5135
5136 /* Adjust EEE LED frequency */
5137 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5138
5139 rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x02, ERIAR_EXGMAC);
4882} 5140}
4883 5141
4884static void rtl_hw_start_8168(struct net_device *dev) 5142static void rtl_hw_start_8168(struct net_device *dev)
@@ -4982,6 +5240,11 @@ static void rtl_hw_start_8168(struct net_device *dev)
4982 rtl_hw_start_8411(tp); 5240 rtl_hw_start_8411(tp);
4983 break; 5241 break;
4984 5242
5243 case RTL_GIGA_MAC_VER_40:
5244 case RTL_GIGA_MAC_VER_41:
5245 rtl_hw_start_8168g_1(tp);
5246 break;
5247
4985 default: 5248 default:
4986 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n", 5249 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
4987 dev->name, tp->mac_version); 5250 dev->name, tp->mac_version);
@@ -5036,7 +5299,7 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
5036 if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) 5299 if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
5037 RTL_W8(Config1, cfg1 & ~LEDS0); 5300 RTL_W8(Config1, cfg1 & ~LEDS0);
5038 5301
5039 rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1)); 5302 rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
5040} 5303}
5041 5304
5042static void rtl_hw_start_8102e_2(struct rtl8169_private *tp) 5305static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
@@ -5056,7 +5319,7 @@ static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
5056{ 5319{
5057 rtl_hw_start_8102e_2(tp); 5320 rtl_hw_start_8102e_2(tp);
5058 5321
5059 rtl_ephy_write(tp->mmio_addr, 0x03, 0xc2f9); 5322 rtl_ephy_write(tp, 0x03, 0xc2f9);
5060} 5323}
5061 5324
5062static void rtl_hw_start_8105e_1(struct rtl8169_private *tp) 5325static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
@@ -5082,15 +5345,13 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
5082 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET); 5345 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5083 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); 5346 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5084 5347
5085 rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1)); 5348 rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
5086} 5349}
5087 5350
5088static void rtl_hw_start_8105e_2(struct rtl8169_private *tp) 5351static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
5089{ 5352{
5090 void __iomem *ioaddr = tp->mmio_addr;
5091
5092 rtl_hw_start_8105e_1(tp); 5353 rtl_hw_start_8105e_1(tp);
5093 rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000); 5354 rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000);
5094} 5355}
5095 5356
5096static void rtl_hw_start_8402(struct rtl8169_private *tp) 5357static void rtl_hw_start_8402(struct rtl8169_private *tp)
@@ -5109,18 +5370,29 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
5109 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); 5370 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5110 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); 5371 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5111 5372
5112 rtl_ephy_init(ioaddr, e_info_8402, ARRAY_SIZE(e_info_8402)); 5373 rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
5113 5374
5114 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT); 5375 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
5115 5376
5116 rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC); 5377 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
5117 rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC); 5378 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
5118 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); 5379 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5119 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); 5380 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5120 rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 5381 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5121 rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 5382 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5122 rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, 5383 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
5123 ERIAR_EXGMAC); 5384}
5385
5386static void rtl_hw_start_8106(struct rtl8169_private *tp)
5387{
5388 void __iomem *ioaddr = tp->mmio_addr;
5389
5390 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5391 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5392
5393 RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
5394 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5395 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
5124} 5396}
5125 5397
5126static void rtl_hw_start_8101(struct net_device *dev) 5398static void rtl_hw_start_8101(struct net_device *dev)
@@ -5167,6 +5439,10 @@ static void rtl_hw_start_8101(struct net_device *dev)
5167 case RTL_GIGA_MAC_VER_37: 5439 case RTL_GIGA_MAC_VER_37:
5168 rtl_hw_start_8402(tp); 5440 rtl_hw_start_8402(tp);
5169 break; 5441 break;
5442
5443 case RTL_GIGA_MAC_VER_39:
5444 rtl_hw_start_8106(tp);
5445 break;
5170 } 5446 }
5171 5447
5172 RTL_W8(Cfg9346, Cfg9346_Lock); 5448 RTL_W8(Cfg9346, Cfg9346_Lock);
@@ -6435,6 +6711,67 @@ static unsigned rtl_try_msi(struct rtl8169_private *tp,
6435 return msi; 6711 return msi;
6436} 6712}
6437 6713
6714DECLARE_RTL_COND(rtl_link_list_ready_cond)
6715{
6716 void __iomem *ioaddr = tp->mmio_addr;
6717
6718 return RTL_R8(MCU) & LINK_LIST_RDY;
6719}
6720
6721DECLARE_RTL_COND(rtl_rxtx_empty_cond)
6722{
6723 void __iomem *ioaddr = tp->mmio_addr;
6724
6725 return (RTL_R8(MCU) & RXTX_EMPTY) == RXTX_EMPTY;
6726}
6727
6728static void __devinit rtl_hw_init_8168g(struct rtl8169_private *tp)
6729{
6730 void __iomem *ioaddr = tp->mmio_addr;
6731 u32 data;
6732
6733 tp->ocp_base = OCP_STD_PHY_BASE;
6734
6735 RTL_W32(MISC, RTL_R32(MISC) | RXDV_GATED_EN);
6736
6737 if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42))
6738 return;
6739
6740 if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
6741 return;
6742
6743 RTL_W8(ChipCmd, RTL_R8(ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
6744 msleep(1);
6745 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
6746
6747 data = r8168_mac_ocp_read(tp, 0xe8de);
6748 data &= ~(1 << 14);
6749 r8168_mac_ocp_write(tp, 0xe8de, data);
6750
6751 if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6752 return;
6753
6754 data = r8168_mac_ocp_read(tp, 0xe8de);
6755 data |= (1 << 15);
6756 r8168_mac_ocp_write(tp, 0xe8de, data);
6757
6758 if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6759 return;
6760}
6761
6762static void __devinit rtl_hw_initialize(struct rtl8169_private *tp)
6763{
6764 switch (tp->mac_version) {
6765 case RTL_GIGA_MAC_VER_40:
6766 case RTL_GIGA_MAC_VER_41:
6767 rtl_hw_init_8168g(tp);
6768 break;
6769
6770 default:
6771 break;
6772 }
6773}
6774
6438static int __devinit 6775static int __devinit
6439rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 6776rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6440{ 6777{
@@ -6544,6 +6881,8 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6544 6881
6545 rtl_irq_disable(tp); 6882 rtl_irq_disable(tp);
6546 6883
6884 rtl_hw_initialize(tp);
6885
6547 rtl_hw_reset(tp); 6886 rtl_hw_reset(tp);
6548 6887
6549 rtl_ack_events(tp, 0xffff); 6888 rtl_ack_events(tp, 0xffff);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 79bf09b41971..af0b867a6cf6 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -49,6 +49,34 @@
49 NETIF_MSG_RX_ERR| \ 49 NETIF_MSG_RX_ERR| \
50 NETIF_MSG_TX_ERR) 50 NETIF_MSG_TX_ERR)
51 51
52#if defined(CONFIG_CPU_SUBTYPE_SH7734) || \
53 defined(CONFIG_CPU_SUBTYPE_SH7763) || \
54 defined(CONFIG_ARCH_R8A7740)
55static void sh_eth_select_mii(struct net_device *ndev)
56{
57 u32 value = 0x0;
58 struct sh_eth_private *mdp = netdev_priv(ndev);
59
60 switch (mdp->phy_interface) {
61 case PHY_INTERFACE_MODE_GMII:
62 value = 0x2;
63 break;
64 case PHY_INTERFACE_MODE_MII:
65 value = 0x1;
66 break;
67 case PHY_INTERFACE_MODE_RMII:
68 value = 0x0;
69 break;
70 default:
71 pr_warn("PHY interface mode was not setup. Set to MII.\n");
72 value = 0x1;
73 break;
74 }
75
76 sh_eth_write(ndev, value, RMII_MII);
77}
78#endif
79
52/* There is CPU dependent code */ 80/* There is CPU dependent code */
53#if defined(CONFIG_CPU_SUBTYPE_SH7724) 81#if defined(CONFIG_CPU_SUBTYPE_SH7724)
54#define SH_ETH_RESET_DEFAULT 1 82#define SH_ETH_RESET_DEFAULT 1
@@ -102,6 +130,8 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
102#elif defined(CONFIG_CPU_SUBTYPE_SH7757) 130#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
103#define SH_ETH_HAS_BOTH_MODULES 1 131#define SH_ETH_HAS_BOTH_MODULES 1
104#define SH_ETH_HAS_TSU 1 132#define SH_ETH_HAS_TSU 1
133static int sh_eth_check_reset(struct net_device *ndev);
134
105static void sh_eth_set_duplex(struct net_device *ndev) 135static void sh_eth_set_duplex(struct net_device *ndev)
106{ 136{
107 struct sh_eth_private *mdp = netdev_priv(ndev); 137 struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -176,23 +206,19 @@ static void sh_eth_chip_reset_giga(struct net_device *ndev)
176} 206}
177 207
178static int sh_eth_is_gether(struct sh_eth_private *mdp); 208static int sh_eth_is_gether(struct sh_eth_private *mdp);
179static void sh_eth_reset(struct net_device *ndev) 209static int sh_eth_reset(struct net_device *ndev)
180{ 210{
181 struct sh_eth_private *mdp = netdev_priv(ndev); 211 struct sh_eth_private *mdp = netdev_priv(ndev);
182 int cnt = 100; 212 int ret = 0;
183 213
184 if (sh_eth_is_gether(mdp)) { 214 if (sh_eth_is_gether(mdp)) {
185 sh_eth_write(ndev, 0x03, EDSR); 215 sh_eth_write(ndev, 0x03, EDSR);
186 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, 216 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
187 EDMR); 217 EDMR);
188 while (cnt > 0) { 218
189 if (!(sh_eth_read(ndev, EDMR) & 0x3)) 219 ret = sh_eth_check_reset(ndev);
190 break; 220 if (ret)
191 mdelay(1); 221 goto out;
192 cnt--;
193 }
194 if (cnt < 0)
195 printk(KERN_ERR "Device reset fail\n");
196 222
197 /* Table Init */ 223 /* Table Init */
198 sh_eth_write(ndev, 0x0, TDLAR); 224 sh_eth_write(ndev, 0x0, TDLAR);
@@ -210,6 +236,9 @@ static void sh_eth_reset(struct net_device *ndev)
210 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, 236 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
211 EDMR); 237 EDMR);
212 } 238 }
239
240out:
241 return ret;
213} 242}
214 243
215static void sh_eth_set_duplex_giga(struct net_device *ndev) 244static void sh_eth_set_duplex_giga(struct net_device *ndev)
@@ -282,7 +311,9 @@ static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp)
282 311
283#elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763) 312#elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763)
284#define SH_ETH_HAS_TSU 1 313#define SH_ETH_HAS_TSU 1
314static int sh_eth_check_reset(struct net_device *ndev);
285static void sh_eth_reset_hw_crc(struct net_device *ndev); 315static void sh_eth_reset_hw_crc(struct net_device *ndev);
316
286static void sh_eth_chip_reset(struct net_device *ndev) 317static void sh_eth_chip_reset(struct net_device *ndev)
287{ 318{
288 struct sh_eth_private *mdp = netdev_priv(ndev); 319 struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -292,35 +323,6 @@ static void sh_eth_chip_reset(struct net_device *ndev)
292 mdelay(1); 323 mdelay(1);
293} 324}
294 325
295static void sh_eth_reset(struct net_device *ndev)
296{
297 int cnt = 100;
298
299 sh_eth_write(ndev, EDSR_ENALL, EDSR);
300 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
301 while (cnt > 0) {
302 if (!(sh_eth_read(ndev, EDMR) & 0x3))
303 break;
304 mdelay(1);
305 cnt--;
306 }
307 if (cnt == 0)
308 printk(KERN_ERR "Device reset fail\n");
309
310 /* Table Init */
311 sh_eth_write(ndev, 0x0, TDLAR);
312 sh_eth_write(ndev, 0x0, TDFAR);
313 sh_eth_write(ndev, 0x0, TDFXR);
314 sh_eth_write(ndev, 0x0, TDFFR);
315 sh_eth_write(ndev, 0x0, RDLAR);
316 sh_eth_write(ndev, 0x0, RDFAR);
317 sh_eth_write(ndev, 0x0, RDFXR);
318 sh_eth_write(ndev, 0x0, RDFFR);
319
320 /* Reset HW CRC register */
321 sh_eth_reset_hw_crc(ndev);
322}
323
324static void sh_eth_set_duplex(struct net_device *ndev) 326static void sh_eth_set_duplex(struct net_device *ndev)
325{ 327{
326 struct sh_eth_private *mdp = netdev_priv(ndev); 328 struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -377,9 +379,41 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
377 .tsu = 1, 379 .tsu = 1,
378#if defined(CONFIG_CPU_SUBTYPE_SH7734) 380#if defined(CONFIG_CPU_SUBTYPE_SH7734)
379 .hw_crc = 1, 381 .hw_crc = 1,
382 .select_mii = 1,
380#endif 383#endif
381}; 384};
382 385
386static int sh_eth_reset(struct net_device *ndev)
387{
388 int ret = 0;
389
390 sh_eth_write(ndev, EDSR_ENALL, EDSR);
391 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
392
393 ret = sh_eth_check_reset(ndev);
394 if (ret)
395 goto out;
396
397 /* Table Init */
398 sh_eth_write(ndev, 0x0, TDLAR);
399 sh_eth_write(ndev, 0x0, TDFAR);
400 sh_eth_write(ndev, 0x0, TDFXR);
401 sh_eth_write(ndev, 0x0, TDFFR);
402 sh_eth_write(ndev, 0x0, RDLAR);
403 sh_eth_write(ndev, 0x0, RDFAR);
404 sh_eth_write(ndev, 0x0, RDFXR);
405 sh_eth_write(ndev, 0x0, RDFFR);
406
407 /* Reset HW CRC register */
408 sh_eth_reset_hw_crc(ndev);
409
410 /* Select MII mode */
411 if (sh_eth_my_cpu_data.select_mii)
412 sh_eth_select_mii(ndev);
413out:
414 return ret;
415}
416
383static void sh_eth_reset_hw_crc(struct net_device *ndev) 417static void sh_eth_reset_hw_crc(struct net_device *ndev)
384{ 418{
385 if (sh_eth_my_cpu_data.hw_crc) 419 if (sh_eth_my_cpu_data.hw_crc)
@@ -388,44 +422,29 @@ static void sh_eth_reset_hw_crc(struct net_device *ndev)
388 422
389#elif defined(CONFIG_ARCH_R8A7740) 423#elif defined(CONFIG_ARCH_R8A7740)
390#define SH_ETH_HAS_TSU 1 424#define SH_ETH_HAS_TSU 1
425static int sh_eth_check_reset(struct net_device *ndev);
426
391static void sh_eth_chip_reset(struct net_device *ndev) 427static void sh_eth_chip_reset(struct net_device *ndev)
392{ 428{
393 struct sh_eth_private *mdp = netdev_priv(ndev); 429 struct sh_eth_private *mdp = netdev_priv(ndev);
394 unsigned long mii;
395 430
396 /* reset device */ 431 /* reset device */
397 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); 432 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
398 mdelay(1); 433 mdelay(1);
399 434
400 switch (mdp->phy_interface) { 435 sh_eth_select_mii(ndev);
401 case PHY_INTERFACE_MODE_GMII:
402 mii = 2;
403 break;
404 case PHY_INTERFACE_MODE_MII:
405 mii = 1;
406 break;
407 case PHY_INTERFACE_MODE_RMII:
408 default:
409 mii = 0;
410 break;
411 }
412 sh_eth_write(ndev, mii, RMII_MII);
413} 436}
414 437
415static void sh_eth_reset(struct net_device *ndev) 438static int sh_eth_reset(struct net_device *ndev)
416{ 439{
417 int cnt = 100; 440 int ret = 0;
418 441
419 sh_eth_write(ndev, EDSR_ENALL, EDSR); 442 sh_eth_write(ndev, EDSR_ENALL, EDSR);
420 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR); 443 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
421 while (cnt > 0) { 444
422 if (!(sh_eth_read(ndev, EDMR) & 0x3)) 445 ret = sh_eth_check_reset(ndev);
423 break; 446 if (ret)
424 mdelay(1); 447 goto out;
425 cnt--;
426 }
427 if (cnt == 0)
428 printk(KERN_ERR "Device reset fail\n");
429 448
430 /* Table Init */ 449 /* Table Init */
431 sh_eth_write(ndev, 0x0, TDLAR); 450 sh_eth_write(ndev, 0x0, TDLAR);
@@ -436,6 +455,9 @@ static void sh_eth_reset(struct net_device *ndev)
436 sh_eth_write(ndev, 0x0, RDFAR); 455 sh_eth_write(ndev, 0x0, RDFAR);
437 sh_eth_write(ndev, 0x0, RDFXR); 456 sh_eth_write(ndev, 0x0, RDFXR);
438 sh_eth_write(ndev, 0x0, RDFFR); 457 sh_eth_write(ndev, 0x0, RDFFR);
458
459out:
460 return ret;
439} 461}
440 462
441static void sh_eth_set_duplex(struct net_device *ndev) 463static void sh_eth_set_duplex(struct net_device *ndev)
@@ -492,6 +514,7 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
492 .no_trimd = 1, 514 .no_trimd = 1,
493 .no_ade = 1, 515 .no_ade = 1,
494 .tsu = 1, 516 .tsu = 1,
517 .select_mii = 1,
495}; 518};
496 519
497#elif defined(CONFIG_CPU_SUBTYPE_SH7619) 520#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
@@ -543,11 +566,31 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
543 566
544#if defined(SH_ETH_RESET_DEFAULT) 567#if defined(SH_ETH_RESET_DEFAULT)
545/* Chip Reset */ 568/* Chip Reset */
546static void sh_eth_reset(struct net_device *ndev) 569static int sh_eth_reset(struct net_device *ndev)
547{ 570{
548 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR); 571 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR);
549 mdelay(3); 572 mdelay(3);
550 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR); 573 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR);
574
575 return 0;
576}
577#else
578static int sh_eth_check_reset(struct net_device *ndev)
579{
580 int ret = 0;
581 int cnt = 100;
582
583 while (cnt > 0) {
584 if (!(sh_eth_read(ndev, EDMR) & 0x3))
585 break;
586 mdelay(1);
587 cnt--;
588 }
589 if (cnt < 0) {
590 printk(KERN_ERR "Device reset fail\n");
591 ret = -ETIMEDOUT;
592 }
593 return ret;
551} 594}
552#endif 595#endif
553 596
@@ -739,21 +782,23 @@ static void sh_eth_ring_free(struct net_device *ndev)
739 782
740 /* Free Rx skb ringbuffer */ 783 /* Free Rx skb ringbuffer */
741 if (mdp->rx_skbuff) { 784 if (mdp->rx_skbuff) {
742 for (i = 0; i < RX_RING_SIZE; i++) { 785 for (i = 0; i < mdp->num_rx_ring; i++) {
743 if (mdp->rx_skbuff[i]) 786 if (mdp->rx_skbuff[i])
744 dev_kfree_skb(mdp->rx_skbuff[i]); 787 dev_kfree_skb(mdp->rx_skbuff[i]);
745 } 788 }
746 } 789 }
747 kfree(mdp->rx_skbuff); 790 kfree(mdp->rx_skbuff);
791 mdp->rx_skbuff = NULL;
748 792
749 /* Free Tx skb ringbuffer */ 793 /* Free Tx skb ringbuffer */
750 if (mdp->tx_skbuff) { 794 if (mdp->tx_skbuff) {
751 for (i = 0; i < TX_RING_SIZE; i++) { 795 for (i = 0; i < mdp->num_tx_ring; i++) {
752 if (mdp->tx_skbuff[i]) 796 if (mdp->tx_skbuff[i])
753 dev_kfree_skb(mdp->tx_skbuff[i]); 797 dev_kfree_skb(mdp->tx_skbuff[i]);
754 } 798 }
755 } 799 }
756 kfree(mdp->tx_skbuff); 800 kfree(mdp->tx_skbuff);
801 mdp->tx_skbuff = NULL;
757} 802}
758 803
759/* format skb and descriptor buffer */ 804/* format skb and descriptor buffer */
@@ -764,8 +809,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
764 struct sk_buff *skb; 809 struct sk_buff *skb;
765 struct sh_eth_rxdesc *rxdesc = NULL; 810 struct sh_eth_rxdesc *rxdesc = NULL;
766 struct sh_eth_txdesc *txdesc = NULL; 811 struct sh_eth_txdesc *txdesc = NULL;
767 int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE; 812 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
768 int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE; 813 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
769 814
770 mdp->cur_rx = mdp->cur_tx = 0; 815 mdp->cur_rx = mdp->cur_tx = 0;
771 mdp->dirty_rx = mdp->dirty_tx = 0; 816 mdp->dirty_rx = mdp->dirty_tx = 0;
@@ -773,7 +818,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
773 memset(mdp->rx_ring, 0, rx_ringsize); 818 memset(mdp->rx_ring, 0, rx_ringsize);
774 819
775 /* build Rx ring buffer */ 820 /* build Rx ring buffer */
776 for (i = 0; i < RX_RING_SIZE; i++) { 821 for (i = 0; i < mdp->num_rx_ring; i++) {
777 /* skb */ 822 /* skb */
778 mdp->rx_skbuff[i] = NULL; 823 mdp->rx_skbuff[i] = NULL;
779 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz); 824 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
@@ -799,7 +844,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
799 } 844 }
800 } 845 }
801 846
802 mdp->dirty_rx = (u32) (i - RX_RING_SIZE); 847 mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
803 848
804 /* Mark the last entry as wrapping the ring. */ 849 /* Mark the last entry as wrapping the ring. */
805 rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL); 850 rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
@@ -807,7 +852,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
807 memset(mdp->tx_ring, 0, tx_ringsize); 852 memset(mdp->tx_ring, 0, tx_ringsize);
808 853
809 /* build Tx ring buffer */ 854 /* build Tx ring buffer */
810 for (i = 0; i < TX_RING_SIZE; i++) { 855 for (i = 0; i < mdp->num_tx_ring; i++) {
811 mdp->tx_skbuff[i] = NULL; 856 mdp->tx_skbuff[i] = NULL;
812 txdesc = &mdp->tx_ring[i]; 857 txdesc = &mdp->tx_ring[i];
813 txdesc->status = cpu_to_edmac(mdp, TD_TFP); 858 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
@@ -841,7 +886,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
841 mdp->rx_buf_sz += NET_IP_ALIGN; 886 mdp->rx_buf_sz += NET_IP_ALIGN;
842 887
843 /* Allocate RX and TX skb rings */ 888 /* Allocate RX and TX skb rings */
844 mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE, 889 mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * mdp->num_rx_ring,
845 GFP_KERNEL); 890 GFP_KERNEL);
846 if (!mdp->rx_skbuff) { 891 if (!mdp->rx_skbuff) {
847 dev_err(&ndev->dev, "Cannot allocate Rx skb\n"); 892 dev_err(&ndev->dev, "Cannot allocate Rx skb\n");
@@ -849,7 +894,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
849 return ret; 894 return ret;
850 } 895 }
851 896
852 mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE, 897 mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * mdp->num_tx_ring,
853 GFP_KERNEL); 898 GFP_KERNEL);
854 if (!mdp->tx_skbuff) { 899 if (!mdp->tx_skbuff) {
855 dev_err(&ndev->dev, "Cannot allocate Tx skb\n"); 900 dev_err(&ndev->dev, "Cannot allocate Tx skb\n");
@@ -858,7 +903,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
858 } 903 }
859 904
860 /* Allocate all Rx descriptors. */ 905 /* Allocate all Rx descriptors. */
861 rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE; 906 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
862 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma, 907 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
863 GFP_KERNEL); 908 GFP_KERNEL);
864 909
@@ -872,7 +917,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
872 mdp->dirty_rx = 0; 917 mdp->dirty_rx = 0;
873 918
874 /* Allocate all Tx descriptors. */ 919 /* Allocate all Tx descriptors. */
875 tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE; 920 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
876 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma, 921 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
877 GFP_KERNEL); 922 GFP_KERNEL);
878 if (!mdp->tx_ring) { 923 if (!mdp->tx_ring) {
@@ -890,19 +935,41 @@ desc_ring_free:
890skb_ring_free: 935skb_ring_free:
891 /* Free Rx and Tx skb ring buffer */ 936 /* Free Rx and Tx skb ring buffer */
892 sh_eth_ring_free(ndev); 937 sh_eth_ring_free(ndev);
938 mdp->tx_ring = NULL;
939 mdp->rx_ring = NULL;
893 940
894 return ret; 941 return ret;
895} 942}
896 943
897static int sh_eth_dev_init(struct net_device *ndev) 944static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp)
945{
946 int ringsize;
947
948 if (mdp->rx_ring) {
949 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
950 dma_free_coherent(NULL, ringsize, mdp->rx_ring,
951 mdp->rx_desc_dma);
952 mdp->rx_ring = NULL;
953 }
954
955 if (mdp->tx_ring) {
956 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
957 dma_free_coherent(NULL, ringsize, mdp->tx_ring,
958 mdp->tx_desc_dma);
959 mdp->tx_ring = NULL;
960 }
961}
962
963static int sh_eth_dev_init(struct net_device *ndev, bool start)
898{ 964{
899 int ret = 0; 965 int ret = 0;
900 struct sh_eth_private *mdp = netdev_priv(ndev); 966 struct sh_eth_private *mdp = netdev_priv(ndev);
901 u_int32_t rx_int_var, tx_int_var;
902 u32 val; 967 u32 val;
903 968
904 /* Soft Reset */ 969 /* Soft Reset */
905 sh_eth_reset(ndev); 970 ret = sh_eth_reset(ndev);
971 if (ret)
972 goto out;
906 973
907 /* Descriptor format */ 974 /* Descriptor format */
908 sh_eth_ring_format(ndev); 975 sh_eth_ring_format(ndev);
@@ -926,9 +993,7 @@ static int sh_eth_dev_init(struct net_device *ndev)
926 /* Frame recv control */ 993 /* Frame recv control */
927 sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR); 994 sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
928 995
929 rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5; 996 sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
930 tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
931 sh_eth_write(ndev, rx_int_var | tx_int_var, TRSCER);
932 997
933 if (mdp->cd->bculr) 998 if (mdp->cd->bculr)
934 sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */ 999 sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
@@ -943,7 +1008,8 @@ static int sh_eth_dev_init(struct net_device *ndev)
943 RFLR); 1008 RFLR);
944 1009
945 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); 1010 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
946 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); 1011 if (start)
1012 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
947 1013
948 /* PAUSE Prohibition */ 1014 /* PAUSE Prohibition */
949 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | 1015 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
@@ -958,7 +1024,8 @@ static int sh_eth_dev_init(struct net_device *ndev)
958 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR); 1024 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
959 1025
960 /* E-MAC Interrupt Enable register */ 1026 /* E-MAC Interrupt Enable register */
961 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR); 1027 if (start)
1028 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
962 1029
963 /* Set MAC address */ 1030 /* Set MAC address */
964 update_mac_address(ndev); 1031 update_mac_address(ndev);
@@ -971,11 +1038,14 @@ static int sh_eth_dev_init(struct net_device *ndev)
971 if (mdp->cd->tpauser) 1038 if (mdp->cd->tpauser)
972 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER); 1039 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
973 1040
974 /* Setting the Rx mode will start the Rx process. */ 1041 if (start) {
975 sh_eth_write(ndev, EDRRR_R, EDRRR); 1042 /* Setting the Rx mode will start the Rx process. */
1043 sh_eth_write(ndev, EDRRR_R, EDRRR);
976 1044
977 netif_start_queue(ndev); 1045 netif_start_queue(ndev);
1046 }
978 1047
1048out:
979 return ret; 1049 return ret;
980} 1050}
981 1051
@@ -988,7 +1058,7 @@ static int sh_eth_txfree(struct net_device *ndev)
988 int entry = 0; 1058 int entry = 0;
989 1059
990 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { 1060 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
991 entry = mdp->dirty_tx % TX_RING_SIZE; 1061 entry = mdp->dirty_tx % mdp->num_tx_ring;
992 txdesc = &mdp->tx_ring[entry]; 1062 txdesc = &mdp->tx_ring[entry];
993 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) 1063 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
994 break; 1064 break;
@@ -1001,7 +1071,7 @@ static int sh_eth_txfree(struct net_device *ndev)
1001 freeNum++; 1071 freeNum++;
1002 } 1072 }
1003 txdesc->status = cpu_to_edmac(mdp, TD_TFP); 1073 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1004 if (entry >= TX_RING_SIZE - 1) 1074 if (entry >= mdp->num_tx_ring - 1)
1005 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); 1075 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1006 1076
1007 ndev->stats.tx_packets++; 1077 ndev->stats.tx_packets++;
@@ -1016,8 +1086,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
1016 struct sh_eth_private *mdp = netdev_priv(ndev); 1086 struct sh_eth_private *mdp = netdev_priv(ndev);
1017 struct sh_eth_rxdesc *rxdesc; 1087 struct sh_eth_rxdesc *rxdesc;
1018 1088
1019 int entry = mdp->cur_rx % RX_RING_SIZE; 1089 int entry = mdp->cur_rx % mdp->num_rx_ring;
1020 int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx; 1090 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
1021 struct sk_buff *skb; 1091 struct sk_buff *skb;
1022 u16 pkt_len = 0; 1092 u16 pkt_len = 0;
1023 u32 desc_status; 1093 u32 desc_status;
@@ -1068,13 +1138,13 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
1068 ndev->stats.rx_bytes += pkt_len; 1138 ndev->stats.rx_bytes += pkt_len;
1069 } 1139 }
1070 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT); 1140 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
1071 entry = (++mdp->cur_rx) % RX_RING_SIZE; 1141 entry = (++mdp->cur_rx) % mdp->num_rx_ring;
1072 rxdesc = &mdp->rx_ring[entry]; 1142 rxdesc = &mdp->rx_ring[entry];
1073 } 1143 }
1074 1144
1075 /* Refill the Rx ring buffers. */ 1145 /* Refill the Rx ring buffers. */
1076 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) { 1146 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
1077 entry = mdp->dirty_rx % RX_RING_SIZE; 1147 entry = mdp->dirty_rx % mdp->num_rx_ring;
1078 rxdesc = &mdp->rx_ring[entry]; 1148 rxdesc = &mdp->rx_ring[entry];
1079 /* The size of the buffer is 16 byte boundary. */ 1149 /* The size of the buffer is 16 byte boundary. */
1080 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 1150 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
@@ -1091,7 +1161,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
1091 skb_checksum_none_assert(skb); 1161 skb_checksum_none_assert(skb);
1092 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); 1162 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
1093 } 1163 }
1094 if (entry >= RX_RING_SIZE - 1) 1164 if (entry >= mdp->num_rx_ring - 1)
1095 rxdesc->status |= 1165 rxdesc->status |=
1096 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL); 1166 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
1097 else 1167 else
@@ -1293,14 +1363,6 @@ other_irq:
1293 return ret; 1363 return ret;
1294} 1364}
1295 1365
1296static void sh_eth_timer(unsigned long data)
1297{
1298 struct net_device *ndev = (struct net_device *)data;
1299 struct sh_eth_private *mdp = netdev_priv(ndev);
1300
1301 mod_timer(&mdp->timer, jiffies + (10 * HZ));
1302}
1303
1304/* PHY state control function */ 1366/* PHY state control function */
1305static void sh_eth_adjust_link(struct net_device *ndev) 1367static void sh_eth_adjust_link(struct net_device *ndev)
1306{ 1368{
@@ -1499,6 +1561,71 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1499 } 1561 }
1500} 1562}
1501 1563
1564static void sh_eth_get_ringparam(struct net_device *ndev,
1565 struct ethtool_ringparam *ring)
1566{
1567 struct sh_eth_private *mdp = netdev_priv(ndev);
1568
1569 ring->rx_max_pending = RX_RING_MAX;
1570 ring->tx_max_pending = TX_RING_MAX;
1571 ring->rx_pending = mdp->num_rx_ring;
1572 ring->tx_pending = mdp->num_tx_ring;
1573}
1574
1575static int sh_eth_set_ringparam(struct net_device *ndev,
1576 struct ethtool_ringparam *ring)
1577{
1578 struct sh_eth_private *mdp = netdev_priv(ndev);
1579 int ret;
1580
1581 if (ring->tx_pending > TX_RING_MAX ||
1582 ring->rx_pending > RX_RING_MAX ||
1583 ring->tx_pending < TX_RING_MIN ||
1584 ring->rx_pending < RX_RING_MIN)
1585 return -EINVAL;
1586 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1587 return -EINVAL;
1588
1589 if (netif_running(ndev)) {
1590 netif_tx_disable(ndev);
1591 /* Disable interrupts by clearing the interrupt mask. */
1592 sh_eth_write(ndev, 0x0000, EESIPR);
1593 /* Stop the chip's Tx and Rx processes. */
1594 sh_eth_write(ndev, 0, EDTRR);
1595 sh_eth_write(ndev, 0, EDRRR);
1596 synchronize_irq(ndev->irq);
1597 }
1598
1599 /* Free all the skbuffs in the Rx queue. */
1600 sh_eth_ring_free(ndev);
1601 /* Free DMA buffer */
1602 sh_eth_free_dma_buffer(mdp);
1603
1604 /* Set new parameters */
1605 mdp->num_rx_ring = ring->rx_pending;
1606 mdp->num_tx_ring = ring->tx_pending;
1607
1608 ret = sh_eth_ring_init(ndev);
1609 if (ret < 0) {
1610 dev_err(&ndev->dev, "%s: sh_eth_ring_init failed.\n", __func__);
1611 return ret;
1612 }
1613 ret = sh_eth_dev_init(ndev, false);
1614 if (ret < 0) {
1615 dev_err(&ndev->dev, "%s: sh_eth_dev_init failed.\n", __func__);
1616 return ret;
1617 }
1618
1619 if (netif_running(ndev)) {
1620 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1621 /* Setting the Rx mode will start the Rx process. */
1622 sh_eth_write(ndev, EDRRR_R, EDRRR);
1623 netif_wake_queue(ndev);
1624 }
1625
1626 return 0;
1627}
1628
1502static const struct ethtool_ops sh_eth_ethtool_ops = { 1629static const struct ethtool_ops sh_eth_ethtool_ops = {
1503 .get_settings = sh_eth_get_settings, 1630 .get_settings = sh_eth_get_settings,
1504 .set_settings = sh_eth_set_settings, 1631 .set_settings = sh_eth_set_settings,
@@ -1509,6 +1636,8 @@ static const struct ethtool_ops sh_eth_ethtool_ops = {
1509 .get_strings = sh_eth_get_strings, 1636 .get_strings = sh_eth_get_strings,
1510 .get_ethtool_stats = sh_eth_get_ethtool_stats, 1637 .get_ethtool_stats = sh_eth_get_ethtool_stats,
1511 .get_sset_count = sh_eth_get_sset_count, 1638 .get_sset_count = sh_eth_get_sset_count,
1639 .get_ringparam = sh_eth_get_ringparam,
1640 .set_ringparam = sh_eth_set_ringparam,
1512}; 1641};
1513 1642
1514/* network device open function */ 1643/* network device open function */
@@ -1539,7 +1668,7 @@ static int sh_eth_open(struct net_device *ndev)
1539 goto out_free_irq; 1668 goto out_free_irq;
1540 1669
1541 /* device init */ 1670 /* device init */
1542 ret = sh_eth_dev_init(ndev); 1671 ret = sh_eth_dev_init(ndev, true);
1543 if (ret) 1672 if (ret)
1544 goto out_free_irq; 1673 goto out_free_irq;
1545 1674
@@ -1548,11 +1677,6 @@ static int sh_eth_open(struct net_device *ndev)
1548 if (ret) 1677 if (ret)
1549 goto out_free_irq; 1678 goto out_free_irq;
1550 1679
1551 /* Set the timer to check for link beat. */
1552 init_timer(&mdp->timer);
1553 mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
1554 setup_timer(&mdp->timer, sh_eth_timer, (unsigned long)ndev);
1555
1556 return ret; 1680 return ret;
1557 1681
1558out_free_irq: 1682out_free_irq:
@@ -1577,11 +1701,8 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
1577 /* tx_errors count up */ 1701 /* tx_errors count up */
1578 ndev->stats.tx_errors++; 1702 ndev->stats.tx_errors++;
1579 1703
1580 /* timer off */
1581 del_timer_sync(&mdp->timer);
1582
1583 /* Free all the skbuffs in the Rx queue. */ 1704 /* Free all the skbuffs in the Rx queue. */
1584 for (i = 0; i < RX_RING_SIZE; i++) { 1705 for (i = 0; i < mdp->num_rx_ring; i++) {
1585 rxdesc = &mdp->rx_ring[i]; 1706 rxdesc = &mdp->rx_ring[i];
1586 rxdesc->status = 0; 1707 rxdesc->status = 0;
1587 rxdesc->addr = 0xBADF00D0; 1708 rxdesc->addr = 0xBADF00D0;
@@ -1589,18 +1710,14 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
1589 dev_kfree_skb(mdp->rx_skbuff[i]); 1710 dev_kfree_skb(mdp->rx_skbuff[i]);
1590 mdp->rx_skbuff[i] = NULL; 1711 mdp->rx_skbuff[i] = NULL;
1591 } 1712 }
1592 for (i = 0; i < TX_RING_SIZE; i++) { 1713 for (i = 0; i < mdp->num_tx_ring; i++) {
1593 if (mdp->tx_skbuff[i]) 1714 if (mdp->tx_skbuff[i])
1594 dev_kfree_skb(mdp->tx_skbuff[i]); 1715 dev_kfree_skb(mdp->tx_skbuff[i]);
1595 mdp->tx_skbuff[i] = NULL; 1716 mdp->tx_skbuff[i] = NULL;
1596 } 1717 }
1597 1718
1598 /* device init */ 1719 /* device init */
1599 sh_eth_dev_init(ndev); 1720 sh_eth_dev_init(ndev, true);
1600
1601 /* timer on */
1602 mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
1603 add_timer(&mdp->timer);
1604} 1721}
1605 1722
1606/* Packet transmit function */ 1723/* Packet transmit function */
@@ -1612,7 +1729,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1612 unsigned long flags; 1729 unsigned long flags;
1613 1730
1614 spin_lock_irqsave(&mdp->lock, flags); 1731 spin_lock_irqsave(&mdp->lock, flags);
1615 if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) { 1732 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
1616 if (!sh_eth_txfree(ndev)) { 1733 if (!sh_eth_txfree(ndev)) {
1617 if (netif_msg_tx_queued(mdp)) 1734 if (netif_msg_tx_queued(mdp))
1618 dev_warn(&ndev->dev, "TxFD exhausted.\n"); 1735 dev_warn(&ndev->dev, "TxFD exhausted.\n");
@@ -1623,7 +1740,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1623 } 1740 }
1624 spin_unlock_irqrestore(&mdp->lock, flags); 1741 spin_unlock_irqrestore(&mdp->lock, flags);
1625 1742
1626 entry = mdp->cur_tx % TX_RING_SIZE; 1743 entry = mdp->cur_tx % mdp->num_tx_ring;
1627 mdp->tx_skbuff[entry] = skb; 1744 mdp->tx_skbuff[entry] = skb;
1628 txdesc = &mdp->tx_ring[entry]; 1745 txdesc = &mdp->tx_ring[entry];
1629 /* soft swap. */ 1746 /* soft swap. */
@@ -1637,7 +1754,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1637 else 1754 else
1638 txdesc->buffer_length = skb->len; 1755 txdesc->buffer_length = skb->len;
1639 1756
1640 if (entry >= TX_RING_SIZE - 1) 1757 if (entry >= mdp->num_tx_ring - 1)
1641 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); 1758 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
1642 else 1759 else
1643 txdesc->status |= cpu_to_edmac(mdp, TD_TACT); 1760 txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
@@ -1654,7 +1771,6 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1654static int sh_eth_close(struct net_device *ndev) 1771static int sh_eth_close(struct net_device *ndev)
1655{ 1772{
1656 struct sh_eth_private *mdp = netdev_priv(ndev); 1773 struct sh_eth_private *mdp = netdev_priv(ndev);
1657 int ringsize;
1658 1774
1659 netif_stop_queue(ndev); 1775 netif_stop_queue(ndev);
1660 1776
@@ -1673,18 +1789,11 @@ static int sh_eth_close(struct net_device *ndev)
1673 1789
1674 free_irq(ndev->irq, ndev); 1790 free_irq(ndev->irq, ndev);
1675 1791
1676 del_timer_sync(&mdp->timer);
1677
1678 /* Free all the skbuffs in the Rx queue. */ 1792 /* Free all the skbuffs in the Rx queue. */
1679 sh_eth_ring_free(ndev); 1793 sh_eth_ring_free(ndev);
1680 1794
1681 /* free DMA buffer */ 1795 /* free DMA buffer */
1682 ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE; 1796 sh_eth_free_dma_buffer(mdp);
1683 dma_free_coherent(NULL, ringsize, mdp->rx_ring, mdp->rx_desc_dma);
1684
1685 /* free DMA buffer */
1686 ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
1687 dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma);
1688 1797
1689 pm_runtime_put_sync(&mdp->pdev->dev); 1798 pm_runtime_put_sync(&mdp->pdev->dev);
1690 1799
@@ -2275,6 +2384,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2275 ether_setup(ndev); 2384 ether_setup(ndev);
2276 2385
2277 mdp = netdev_priv(ndev); 2386 mdp = netdev_priv(ndev);
2387 mdp->num_tx_ring = TX_RING_SIZE;
2388 mdp->num_rx_ring = RX_RING_SIZE;
2278 mdp->addr = ioremap(res->start, resource_size(res)); 2389 mdp->addr = ioremap(res->start, resource_size(res));
2279 if (mdp->addr == NULL) { 2390 if (mdp->addr == NULL) {
2280 ret = -ENOMEM; 2391 ret = -ENOMEM;
@@ -2312,8 +2423,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2312 2423
2313 /* debug message level */ 2424 /* debug message level */
2314 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE; 2425 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
2315 mdp->post_rx = POST_RX >> (devno << 1);
2316 mdp->post_fw = POST_FW >> (devno << 1);
2317 2426
2318 /* read and set MAC address */ 2427 /* read and set MAC address */
2319 read_mac_address(ndev, pd->mac_addr); 2428 read_mac_address(ndev, pd->mac_addr);
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 57b8e1fc5d15..bae84fd2e73a 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -27,6 +27,10 @@
27#define TX_TIMEOUT (5*HZ) 27#define TX_TIMEOUT (5*HZ)
28#define TX_RING_SIZE 64 /* Tx ring size */ 28#define TX_RING_SIZE 64 /* Tx ring size */
29#define RX_RING_SIZE 64 /* Rx ring size */ 29#define RX_RING_SIZE 64 /* Rx ring size */
30#define TX_RING_MIN 64
31#define RX_RING_MIN 64
32#define TX_RING_MAX 1024
33#define RX_RING_MAX 1024
30#define ETHERSMALL 60 34#define ETHERSMALL 60
31#define PKT_BUF_SZ 1538 35#define PKT_BUF_SZ 1538
32#define SH_ETH_TSU_TIMEOUT_MS 500 36#define SH_ETH_TSU_TIMEOUT_MS 500
@@ -585,71 +589,6 @@ enum RPADIR_BIT {
585/* FDR */ 589/* FDR */
586#define DEFAULT_FDR_INIT 0x00000707 590#define DEFAULT_FDR_INIT 0x00000707
587 591
588enum phy_offsets {
589 PHY_CTRL = 0, PHY_STAT = 1, PHY_IDT1 = 2, PHY_IDT2 = 3,
590 PHY_ANA = 4, PHY_ANL = 5, PHY_ANE = 6,
591 PHY_16 = 16,
592};
593
594/* PHY_CTRL */
595enum PHY_CTRL_BIT {
596 PHY_C_RESET = 0x8000, PHY_C_LOOPBK = 0x4000, PHY_C_SPEEDSL = 0x2000,
597 PHY_C_ANEGEN = 0x1000, PHY_C_PWRDN = 0x0800, PHY_C_ISO = 0x0400,
598 PHY_C_RANEG = 0x0200, PHY_C_DUPLEX = 0x0100, PHY_C_COLT = 0x0080,
599};
600#define DM9161_PHY_C_ANEGEN 0 /* auto nego special */
601
602/* PHY_STAT */
603enum PHY_STAT_BIT {
604 PHY_S_100T4 = 0x8000, PHY_S_100X_F = 0x4000, PHY_S_100X_H = 0x2000,
605 PHY_S_10T_F = 0x1000, PHY_S_10T_H = 0x0800, PHY_S_ANEGC = 0x0020,
606 PHY_S_RFAULT = 0x0010, PHY_S_ANEGA = 0x0008, PHY_S_LINK = 0x0004,
607 PHY_S_JAB = 0x0002, PHY_S_EXTD = 0x0001,
608};
609
610/* PHY_ANA */
611enum PHY_ANA_BIT {
612 PHY_A_NP = 0x8000, PHY_A_ACK = 0x4000, PHY_A_RF = 0x2000,
613 PHY_A_FCS = 0x0400, PHY_A_T4 = 0x0200, PHY_A_FDX = 0x0100,
614 PHY_A_HDX = 0x0080, PHY_A_10FDX = 0x0040, PHY_A_10HDX = 0x0020,
615 PHY_A_SEL = 0x001e,
616};
617/* PHY_ANL */
618enum PHY_ANL_BIT {
619 PHY_L_NP = 0x8000, PHY_L_ACK = 0x4000, PHY_L_RF = 0x2000,
620 PHY_L_FCS = 0x0400, PHY_L_T4 = 0x0200, PHY_L_FDX = 0x0100,
621 PHY_L_HDX = 0x0080, PHY_L_10FDX = 0x0040, PHY_L_10HDX = 0x0020,
622 PHY_L_SEL = 0x001f,
623};
624
625/* PHY_ANE */
626enum PHY_ANE_BIT {
627 PHY_E_PDF = 0x0010, PHY_E_LPNPA = 0x0008, PHY_E_NPA = 0x0004,
628 PHY_E_PRX = 0x0002, PHY_E_LPANEGA = 0x0001,
629};
630
631/* DM9161 */
632enum PHY_16_BIT {
633 PHY_16_BP4B45 = 0x8000, PHY_16_BPSCR = 0x4000, PHY_16_BPALIGN = 0x2000,
634 PHY_16_BP_ADPOK = 0x1000, PHY_16_Repeatmode = 0x0800,
635 PHY_16_TXselect = 0x0400,
636 PHY_16_Rsvd = 0x0200, PHY_16_RMIIEnable = 0x0100,
637 PHY_16_Force100LNK = 0x0080,
638 PHY_16_APDLED_CTL = 0x0040, PHY_16_COLLED_CTL = 0x0020,
639 PHY_16_RPDCTR_EN = 0x0010,
640 PHY_16_ResetStMch = 0x0008, PHY_16_PreamSupr = 0x0004,
641 PHY_16_Sleepmode = 0x0002,
642 PHY_16_RemoteLoopOut = 0x0001,
643};
644
645#define POST_RX 0x08
646#define POST_FW 0x04
647#define POST0_RX (POST_RX)
648#define POST0_FW (POST_FW)
649#define POST1_RX (POST_RX >> 2)
650#define POST1_FW (POST_FW >> 2)
651#define POST_ALL (POST0_RX | POST0_FW | POST1_RX | POST1_FW)
652
653/* ARSTR */ 592/* ARSTR */
654enum ARSTR_BIT { ARSTR_ARSTR = 0x00000001, }; 593enum ARSTR_BIT { ARSTR_ARSTR = 0x00000001, };
655 594
@@ -757,6 +696,7 @@ struct sh_eth_cpu_data {
757 unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */ 696 unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */
758 unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */ 697 unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */
759 unsigned hw_crc:1; /* E-DMAC have CSMR */ 698 unsigned hw_crc:1; /* E-DMAC have CSMR */
699 unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */
760}; 700};
761 701
762struct sh_eth_private { 702struct sh_eth_private {
@@ -765,13 +705,14 @@ struct sh_eth_private {
765 const u16 *reg_offset; 705 const u16 *reg_offset;
766 void __iomem *addr; 706 void __iomem *addr;
767 void __iomem *tsu_addr; 707 void __iomem *tsu_addr;
708 u32 num_rx_ring;
709 u32 num_tx_ring;
768 dma_addr_t rx_desc_dma; 710 dma_addr_t rx_desc_dma;
769 dma_addr_t tx_desc_dma; 711 dma_addr_t tx_desc_dma;
770 struct sh_eth_rxdesc *rx_ring; 712 struct sh_eth_rxdesc *rx_ring;
771 struct sh_eth_txdesc *tx_ring; 713 struct sh_eth_txdesc *tx_ring;
772 struct sk_buff **rx_skbuff; 714 struct sk_buff **rx_skbuff;
773 struct sk_buff **tx_skbuff; 715 struct sk_buff **tx_skbuff;
774 struct timer_list timer;
775 spinlock_t lock; 716 spinlock_t lock;
776 u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */ 717 u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */
777 u32 cur_tx, dirty_tx; 718 u32 cur_tx, dirty_tx;
@@ -786,10 +727,6 @@ struct sh_eth_private {
786 int msg_enable; 727 int msg_enable;
787 int speed; 728 int speed;
788 int duplex; 729 int duplex;
789 u32 rx_int_var, tx_int_var; /* interrupt control variables */
790 char post_rx; /* POST receive */
791 char post_fw; /* POST forward */
792 struct net_device_stats tsu_stats; /* TSU forward status */
793 int port; /* for TSU */ 730 int port; /* for TSU */
794 int vlan_num_ids; /* for VLAN tag filter */ 731 int vlan_num_ids; /* for VLAN tag filter */
795 732
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index b95f2e1b33f0..70554a1b2b02 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1103,8 +1103,8 @@ static int efx_init_io(struct efx_nic *efx)
1103 * masks event though they reject 46 bit masks. 1103 * masks event though they reject 46 bit masks.
1104 */ 1104 */
1105 while (dma_mask > 0x7fffffffUL) { 1105 while (dma_mask > 0x7fffffffUL) {
1106 if (pci_dma_supported(pci_dev, dma_mask)) { 1106 if (dma_supported(&pci_dev->dev, dma_mask)) {
1107 rc = pci_set_dma_mask(pci_dev, dma_mask); 1107 rc = dma_set_mask(&pci_dev->dev, dma_mask);
1108 if (rc == 0) 1108 if (rc == 0)
1109 break; 1109 break;
1110 } 1110 }
@@ -1117,10 +1117,10 @@ static int efx_init_io(struct efx_nic *efx)
1117 } 1117 }
1118 netif_dbg(efx, probe, efx->net_dev, 1118 netif_dbg(efx, probe, efx->net_dev,
1119 "using DMA mask %llx\n", (unsigned long long) dma_mask); 1119 "using DMA mask %llx\n", (unsigned long long) dma_mask);
1120 rc = pci_set_consistent_dma_mask(pci_dev, dma_mask); 1120 rc = dma_set_coherent_mask(&pci_dev->dev, dma_mask);
1121 if (rc) { 1121 if (rc) {
1122 /* pci_set_consistent_dma_mask() is not *allowed* to 1122 /* dma_set_coherent_mask() is not *allowed* to
1123 * fail with a mask that pci_set_dma_mask() accepted, 1123 * fail with a mask that dma_set_mask() accepted,
1124 * but just in case... 1124 * but just in case...
1125 */ 1125 */
1126 netif_err(efx, probe, efx->net_dev, 1126 netif_err(efx, probe, efx->net_dev,
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index d725a8fbe1a6..182dbe2cc6e4 100644
--- a/drivers/net/ethernet/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -136,10 +136,10 @@ enum efx_loopback_mode {
136 * 136 *
137 * Reset methods are numbered in order of increasing scope. 137 * Reset methods are numbered in order of increasing scope.
138 * 138 *
139 * @RESET_TYPE_INVISIBLE: don't reset the PHYs or interrupts 139 * @RESET_TYPE_INVISIBLE: Reset datapath and MAC (Falcon only)
140 * @RESET_TYPE_ALL: reset everything but PCI core blocks 140 * @RESET_TYPE_ALL: Reset datapath, MAC and PHY
141 * @RESET_TYPE_WORLD: reset everything, save & restore PCI config 141 * @RESET_TYPE_WORLD: Reset as much as possible
142 * @RESET_TYPE_DISABLE: disable NIC 142 * @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled
143 * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog 143 * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
144 * @RESET_TYPE_INT_ERROR: reset due to internal error 144 * @RESET_TYPE_INT_ERROR: reset due to internal error
145 * @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors 145 * @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 03ded364c8da..10536f93b561 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -453,7 +453,7 @@ static void efx_ethtool_get_strings(struct net_device *net_dev,
453 switch (string_set) { 453 switch (string_set) {
454 case ETH_SS_STATS: 454 case ETH_SS_STATS:
455 for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) 455 for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++)
456 strncpy(ethtool_strings[i].name, 456 strlcpy(ethtool_strings[i].name,
457 efx_ethtool_stats[i].name, 457 efx_ethtool_stats[i].name,
458 sizeof(ethtool_strings[i].name)); 458 sizeof(ethtool_strings[i].name));
459 break; 459 break;
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 3a1ca2bd1548..12b573a8e82b 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -25,9 +25,12 @@
25#include "io.h" 25#include "io.h"
26#include "phy.h" 26#include "phy.h"
27#include "workarounds.h" 27#include "workarounds.h"
28#include "selftest.h"
28 29
29/* Hardware control for SFC4000 (aka Falcon). */ 30/* Hardware control for SFC4000 (aka Falcon). */
30 31
32static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
33
31static const unsigned int 34static const unsigned int
32/* "Large" EEPROM device: Atmel AT25640 or similar 35/* "Large" EEPROM device: Atmel AT25640 or similar
33 * 8 KB, 16-bit address, 32 B write block */ 36 * 8 KB, 16-bit address, 32 B write block */
@@ -1034,10 +1037,34 @@ static const struct efx_nic_register_test falcon_b0_register_tests[] = {
1034 EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) }, 1037 EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
1035}; 1038};
1036 1039
1037static int falcon_b0_test_registers(struct efx_nic *efx) 1040static int
1041falcon_b0_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
1038{ 1042{
1039 return efx_nic_test_registers(efx, falcon_b0_register_tests, 1043 enum reset_type reset_method = RESET_TYPE_INVISIBLE;
1040 ARRAY_SIZE(falcon_b0_register_tests)); 1044 int rc, rc2;
1045
1046 mutex_lock(&efx->mac_lock);
1047 if (efx->loopback_modes) {
1048 /* We need the 312 clock from the PHY to test the XMAC
1049 * registers, so move into XGMII loopback if available */
1050 if (efx->loopback_modes & (1 << LOOPBACK_XGMII))
1051 efx->loopback_mode = LOOPBACK_XGMII;
1052 else
1053 efx->loopback_mode = __ffs(efx->loopback_modes);
1054 }
1055 __efx_reconfigure_port(efx);
1056 mutex_unlock(&efx->mac_lock);
1057
1058 efx_reset_down(efx, reset_method);
1059
1060 tests->registers =
1061 efx_nic_test_registers(efx, falcon_b0_register_tests,
1062 ARRAY_SIZE(falcon_b0_register_tests))
1063 ? -1 : 1;
1064
1065 rc = falcon_reset_hw(efx, reset_method);
1066 rc2 = efx_reset_up(efx, reset_method, rc == 0);
1067 return rc ? rc : rc2;
1041} 1068}
1042 1069
1043/************************************************************************** 1070/**************************************************************************
@@ -1818,7 +1845,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
1818 .get_wol = falcon_get_wol, 1845 .get_wol = falcon_get_wol,
1819 .set_wol = falcon_set_wol, 1846 .set_wol = falcon_set_wol,
1820 .resume_wol = efx_port_dummy_op_void, 1847 .resume_wol = efx_port_dummy_op_void,
1821 .test_registers = falcon_b0_test_registers, 1848 .test_chip = falcon_b0_test_chip,
1822 .test_nvram = falcon_test_nvram, 1849 .test_nvram = falcon_test_nvram,
1823 1850
1824 .revision = EFX_REV_FALCON_B0, 1851 .revision = EFX_REV_FALCON_B0,
diff --git a/drivers/net/ethernet/sfc/falcon_xmac.c b/drivers/net/ethernet/sfc/falcon_xmac.c
index 6106ef15dee3..8333865d4c95 100644
--- a/drivers/net/ethernet/sfc/falcon_xmac.c
+++ b/drivers/net/ethernet/sfc/falcon_xmac.c
@@ -341,12 +341,12 @@ void falcon_update_stats_xmac(struct efx_nic *efx)
341 FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error); 341 FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error);
342 342
343 /* Update derived statistics */ 343 /* Update derived statistics */
344 mac_stats->tx_good_bytes = 344 efx_update_diff_stat(&mac_stats->tx_good_bytes,
345 (mac_stats->tx_bytes - mac_stats->tx_bad_bytes - 345 mac_stats->tx_bytes - mac_stats->tx_bad_bytes -
346 mac_stats->tx_control * 64); 346 mac_stats->tx_control * 64);
347 mac_stats->rx_bad_bytes = 347 efx_update_diff_stat(&mac_stats->rx_bad_bytes,
348 (mac_stats->rx_bytes - mac_stats->rx_good_bytes - 348 mac_stats->rx_bytes - mac_stats->rx_good_bytes -
349 mac_stats->rx_control * 64); 349 mac_stats->rx_control * 64);
350} 350}
351 351
352void falcon_poll_xmac(struct efx_nic *efx) 352void falcon_poll_xmac(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
index fea7f7300675..c3fd61f0a95c 100644
--- a/drivers/net/ethernet/sfc/filter.c
+++ b/drivers/net/ethernet/sfc/filter.c
@@ -662,7 +662,7 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
662 struct efx_filter_table *table = efx_filter_spec_table(state, spec); 662 struct efx_filter_table *table = efx_filter_spec_table(state, spec);
663 struct efx_filter_spec *saved_spec; 663 struct efx_filter_spec *saved_spec;
664 efx_oword_t filter; 664 efx_oword_t filter;
665 unsigned int filter_idx, depth; 665 unsigned int filter_idx, depth = 0;
666 u32 key; 666 u32 key;
667 int rc; 667 int rc;
668 668
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 17b6463e459c..fc5e7bbcbc9e 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1001,12 +1001,17 @@ static void efx_mcdi_exit_assertion(struct efx_nic *efx)
1001{ 1001{
1002 u8 inbuf[MC_CMD_REBOOT_IN_LEN]; 1002 u8 inbuf[MC_CMD_REBOOT_IN_LEN];
1003 1003
1004 /* Atomically reboot the mcfw out of the assertion handler */ 1004 /* If the MC is running debug firmware, it might now be
1005 * waiting for a debugger to attach, but we just want it to
1006 * reboot. We set a flag that makes the command a no-op if it
1007 * has already done so. We don't know what return code to
1008 * expect (0 or -EIO), so ignore it.
1009 */
1005 BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); 1010 BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
1006 MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 1011 MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
1007 MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION); 1012 MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
1008 efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN, 1013 (void) efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
1009 NULL, 0, NULL); 1014 NULL, 0, NULL);
1010} 1015}
1011 1016
1012int efx_mcdi_handle_assertion(struct efx_nic *efx) 1017int efx_mcdi_handle_assertion(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c
index fb7f65b59eb8..1d552f0664d7 100644
--- a/drivers/net/ethernet/sfc/mcdi_mon.c
+++ b/drivers/net/ethernet/sfc/mcdi_mon.c
@@ -222,6 +222,7 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
222 attr->index = index; 222 attr->index = index;
223 attr->type = type; 223 attr->type = type;
224 attr->limit_value = limit_value; 224 attr->limit_value = limit_value;
225 sysfs_attr_init(&attr->dev_attr.attr);
225 attr->dev_attr.attr.name = attr->name; 226 attr->dev_attr.attr.name = attr->name;
226 attr->dev_attr.attr.mode = S_IRUGO; 227 attr->dev_attr.attr.mode = S_IRUGO;
227 attr->dev_attr.show = reader; 228 attr->dev_attr.show = reader;
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index 0310b9f08c9b..db4beed97669 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -48,8 +48,7 @@
48 48
49/* Unused commands: 0x23, 0x27, 0x30, 0x31 */ 49/* Unused commands: 0x23, 0x27, 0x30, 0x31 */
50 50
51/** 51/* MCDI version 1
52 * MCDI version 1
53 * 52 *
54 * Each MCDI request starts with an MCDI_HEADER, which is a 32byte 53 * Each MCDI request starts with an MCDI_HEADER, which is a 32byte
55 * structure, filled in by the client. 54 * structure, filled in by the client.
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 0e575359af17..cd9c0a989692 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -68,6 +68,8 @@
68#define EFX_TXQ_TYPES 4 68#define EFX_TXQ_TYPES 4
69#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS) 69#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
70 70
71struct efx_self_tests;
72
71/** 73/**
72 * struct efx_special_buffer - An Efx special buffer 74 * struct efx_special_buffer - An Efx special buffer
73 * @addr: CPU base address of the buffer 75 * @addr: CPU base address of the buffer
@@ -100,7 +102,7 @@ struct efx_special_buffer {
100 * @len: Length of this fragment. 102 * @len: Length of this fragment.
101 * This field is zero when the queue slot is empty. 103 * This field is zero when the queue slot is empty.
102 * @continuation: True if this fragment is not the end of a packet. 104 * @continuation: True if this fragment is not the end of a packet.
103 * @unmap_single: True if pci_unmap_single should be used. 105 * @unmap_single: True if dma_unmap_single should be used.
104 * @unmap_len: Length of this fragment to unmap 106 * @unmap_len: Length of this fragment to unmap
105 */ 107 */
106struct efx_tx_buffer { 108struct efx_tx_buffer {
@@ -527,7 +529,7 @@ struct efx_phy_operations {
527}; 529};
528 530
529/** 531/**
530 * @enum efx_phy_mode - PHY operating mode flags 532 * enum efx_phy_mode - PHY operating mode flags
531 * @PHY_MODE_NORMAL: on and should pass traffic 533 * @PHY_MODE_NORMAL: on and should pass traffic
532 * @PHY_MODE_TX_DISABLED: on with TX disabled 534 * @PHY_MODE_TX_DISABLED: on with TX disabled
533 * @PHY_MODE_LOW_POWER: set to low power through MDIO 535 * @PHY_MODE_LOW_POWER: set to low power through MDIO
@@ -901,7 +903,8 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
901 * @get_wol: Get WoL configuration from driver state 903 * @get_wol: Get WoL configuration from driver state
902 * @set_wol: Push WoL configuration to the NIC 904 * @set_wol: Push WoL configuration to the NIC
903 * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume) 905 * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume)
904 * @test_registers: Test read/write functionality of control registers 906 * @test_chip: Test registers. Should use efx_nic_test_registers(), and is
907 * expected to reset the NIC.
905 * @test_nvram: Test validity of NVRAM contents 908 * @test_nvram: Test validity of NVRAM contents
906 * @revision: Hardware architecture revision 909 * @revision: Hardware architecture revision
907 * @mem_map_size: Memory BAR mapped size 910 * @mem_map_size: Memory BAR mapped size
@@ -946,7 +949,7 @@ struct efx_nic_type {
946 void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol); 949 void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
947 int (*set_wol)(struct efx_nic *efx, u32 type); 950 int (*set_wol)(struct efx_nic *efx, u32 type);
948 void (*resume_wol)(struct efx_nic *efx); 951 void (*resume_wol)(struct efx_nic *efx);
949 int (*test_registers)(struct efx_nic *efx); 952 int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests);
950 int (*test_nvram)(struct efx_nic *efx); 953 int (*test_nvram)(struct efx_nic *efx);
951 954
952 int revision; 955 int revision;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 4a9a5beec8fc..326d799762d6 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -124,9 +124,6 @@ int efx_nic_test_registers(struct efx_nic *efx,
124 unsigned address = 0, i, j; 124 unsigned address = 0, i, j;
125 efx_oword_t mask, imask, original, reg, buf; 125 efx_oword_t mask, imask, original, reg, buf;
126 126
127 /* Falcon should be in loopback to isolate the XMAC from the PHY */
128 WARN_ON(!LOOPBACK_INTERNAL(efx));
129
130 for (i = 0; i < n_regs; ++i) { 127 for (i = 0; i < n_regs; ++i) {
131 address = regs[i].address; 128 address = regs[i].address;
132 mask = imask = regs[i].mask; 129 mask = imask = regs[i].mask;
@@ -308,8 +305,8 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
308int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, 305int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
309 unsigned int len) 306 unsigned int len)
310{ 307{
311 buffer->addr = pci_alloc_consistent(efx->pci_dev, len, 308 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
312 &buffer->dma_addr); 309 &buffer->dma_addr, GFP_ATOMIC);
313 if (!buffer->addr) 310 if (!buffer->addr)
314 return -ENOMEM; 311 return -ENOMEM;
315 buffer->len = len; 312 buffer->len = len;
@@ -320,8 +317,8 @@ int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
320void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) 317void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
321{ 318{
322 if (buffer->addr) { 319 if (buffer->addr) {
323 pci_free_consistent(efx->pci_dev, buffer->len, 320 dma_free_coherent(&efx->pci_dev->dev, buffer->len,
324 buffer->addr, buffer->dma_addr); 321 buffer->addr, buffer->dma_addr);
325 buffer->addr = NULL; 322 buffer->addr = NULL;
326 } 323 }
327} 324}
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index f48ccf6bb3b9..bab5cd9f5740 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -294,6 +294,24 @@ extern bool falcon_xmac_check_fault(struct efx_nic *efx);
294extern int falcon_reconfigure_xmac(struct efx_nic *efx); 294extern int falcon_reconfigure_xmac(struct efx_nic *efx);
295extern void falcon_update_stats_xmac(struct efx_nic *efx); 295extern void falcon_update_stats_xmac(struct efx_nic *efx);
296 296
297/* Some statistics are computed as A - B where A and B each increase
298 * linearly with some hardware counter(s) and the counters are read
299 * asynchronously. If the counters contributing to B are always read
300 * after those contributing to A, the computed value may be lower than
301 * the true value by some variable amount, and may decrease between
302 * subsequent computations.
303 *
304 * We should never allow statistics to decrease or to exceed the true
305 * value. Since the computed value will never be greater than the
306 * true value, we can achieve this by only storing the computed value
307 * when it increases.
308 */
309static inline void efx_update_diff_stat(u64 *stat, u64 diff)
310{
311 if ((s64)(diff - *stat) > 0)
312 *stat = diff;
313}
314
297/* Interrupts and test events */ 315/* Interrupts and test events */
298extern int efx_nic_init_interrupt(struct efx_nic *efx); 316extern int efx_nic_init_interrupt(struct efx_nic *efx);
299extern void efx_nic_enable_interrupts(struct efx_nic *efx); 317extern void efx_nic_enable_interrupts(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 243e91f3dff9..719319b89d7a 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -155,11 +155,11 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
155 rx_buf->len = skb_len - NET_IP_ALIGN; 155 rx_buf->len = skb_len - NET_IP_ALIGN;
156 rx_buf->flags = 0; 156 rx_buf->flags = 0;
157 157
158 rx_buf->dma_addr = pci_map_single(efx->pci_dev, 158 rx_buf->dma_addr = dma_map_single(&efx->pci_dev->dev,
159 skb->data, rx_buf->len, 159 skb->data, rx_buf->len,
160 PCI_DMA_FROMDEVICE); 160 DMA_FROM_DEVICE);
161 if (unlikely(pci_dma_mapping_error(efx->pci_dev, 161 if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
162 rx_buf->dma_addr))) { 162 rx_buf->dma_addr))) {
163 dev_kfree_skb_any(skb); 163 dev_kfree_skb_any(skb);
164 rx_buf->u.skb = NULL; 164 rx_buf->u.skb = NULL;
165 return -EIO; 165 return -EIO;
@@ -200,10 +200,10 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
200 efx->rx_buffer_order); 200 efx->rx_buffer_order);
201 if (unlikely(page == NULL)) 201 if (unlikely(page == NULL))
202 return -ENOMEM; 202 return -ENOMEM;
203 dma_addr = pci_map_page(efx->pci_dev, page, 0, 203 dma_addr = dma_map_page(&efx->pci_dev->dev, page, 0,
204 efx_rx_buf_size(efx), 204 efx_rx_buf_size(efx),
205 PCI_DMA_FROMDEVICE); 205 DMA_FROM_DEVICE);
206 if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) { 206 if (unlikely(dma_mapping_error(&efx->pci_dev->dev, dma_addr))) {
207 __free_pages(page, efx->rx_buffer_order); 207 __free_pages(page, efx->rx_buffer_order);
208 return -EIO; 208 return -EIO;
209 } 209 }
@@ -247,14 +247,14 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
247 247
248 state = page_address(rx_buf->u.page); 248 state = page_address(rx_buf->u.page);
249 if (--state->refcnt == 0) { 249 if (--state->refcnt == 0) {
250 pci_unmap_page(efx->pci_dev, 250 dma_unmap_page(&efx->pci_dev->dev,
251 state->dma_addr, 251 state->dma_addr,
252 efx_rx_buf_size(efx), 252 efx_rx_buf_size(efx),
253 PCI_DMA_FROMDEVICE); 253 DMA_FROM_DEVICE);
254 } 254 }
255 } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) { 255 } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
256 pci_unmap_single(efx->pci_dev, rx_buf->dma_addr, 256 dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr,
257 rx_buf->len, PCI_DMA_FROMDEVICE); 257 rx_buf->len, DMA_FROM_DEVICE);
258 } 258 }
259} 259}
260 260
@@ -336,6 +336,7 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
336/** 336/**
337 * efx_fast_push_rx_descriptors - push new RX descriptors quickly 337 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
338 * @rx_queue: RX descriptor queue 338 * @rx_queue: RX descriptor queue
339 *
339 * This will aim to fill the RX descriptor queue up to 340 * This will aim to fill the RX descriptor queue up to
340 * @rx_queue->@max_fill. If there is insufficient atomic 341 * @rx_queue->@max_fill. If there is insufficient atomic
341 * memory to do so, a slow fill will be scheduled. 342 * memory to do so, a slow fill will be scheduled.
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index de4c0069f5b2..96068d15b601 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -120,19 +120,6 @@ static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests)
120 return rc; 120 return rc;
121} 121}
122 122
123static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
124{
125 int rc = 0;
126
127 /* Test register access */
128 if (efx->type->test_registers) {
129 rc = efx->type->test_registers(efx);
130 tests->registers = rc ? -1 : 1;
131 }
132
133 return rc;
134}
135
136/************************************************************************** 123/**************************************************************************
137 * 124 *
138 * Interrupt and event queue testing 125 * Interrupt and event queue testing
@@ -488,7 +475,7 @@ static int efx_end_loopback(struct efx_tx_queue *tx_queue,
488 skb = state->skbs[i]; 475 skb = state->skbs[i];
489 if (skb && !skb_shared(skb)) 476 if (skb && !skb_shared(skb))
490 ++tx_done; 477 ++tx_done;
491 dev_kfree_skb_any(skb); 478 dev_kfree_skb(skb);
492 } 479 }
493 480
494 netif_tx_unlock_bh(efx->net_dev); 481 netif_tx_unlock_bh(efx->net_dev);
@@ -699,8 +686,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
699{ 686{
700 enum efx_loopback_mode loopback_mode = efx->loopback_mode; 687 enum efx_loopback_mode loopback_mode = efx->loopback_mode;
701 int phy_mode = efx->phy_mode; 688 int phy_mode = efx->phy_mode;
702 enum reset_type reset_method = RESET_TYPE_INVISIBLE; 689 int rc_test = 0, rc_reset, rc;
703 int rc_test = 0, rc_reset = 0, rc;
704 690
705 efx_selftest_async_cancel(efx); 691 efx_selftest_async_cancel(efx);
706 692
@@ -737,44 +723,26 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
737 */ 723 */
738 netif_device_detach(efx->net_dev); 724 netif_device_detach(efx->net_dev);
739 725
740 mutex_lock(&efx->mac_lock); 726 if (efx->type->test_chip) {
741 if (efx->loopback_modes) { 727 rc_reset = efx->type->test_chip(efx, tests);
742 /* We need the 312 clock from the PHY to test the XMAC 728 if (rc_reset) {
743 * registers, so move into XGMII loopback if available */ 729 netif_err(efx, hw, efx->net_dev,
744 if (efx->loopback_modes & (1 << LOOPBACK_XGMII)) 730 "Unable to recover from chip test\n");
745 efx->loopback_mode = LOOPBACK_XGMII; 731 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
746 else 732 return rc_reset;
747 efx->loopback_mode = __ffs(efx->loopback_modes); 733 }
748 }
749
750 __efx_reconfigure_port(efx);
751 mutex_unlock(&efx->mac_lock);
752
753 /* free up all consumers of SRAM (including all the queues) */
754 efx_reset_down(efx, reset_method);
755
756 rc = efx_test_chip(efx, tests);
757 if (rc && !rc_test)
758 rc_test = rc;
759 734
760 /* reset the chip to recover from the register test */ 735 if ((tests->registers < 0) && !rc_test)
761 rc_reset = efx->type->reset(efx, reset_method); 736 rc_test = -EIO;
737 }
762 738
763 /* Ensure that the phy is powered and out of loopback 739 /* Ensure that the phy is powered and out of loopback
764 * for the bist and loopback tests */ 740 * for the bist and loopback tests */
741 mutex_lock(&efx->mac_lock);
765 efx->phy_mode &= ~PHY_MODE_LOW_POWER; 742 efx->phy_mode &= ~PHY_MODE_LOW_POWER;
766 efx->loopback_mode = LOOPBACK_NONE; 743 efx->loopback_mode = LOOPBACK_NONE;
767 744 __efx_reconfigure_port(efx);
768 rc = efx_reset_up(efx, reset_method, rc_reset == 0); 745 mutex_unlock(&efx->mac_lock);
769 if (rc && !rc_reset)
770 rc_reset = rc;
771
772 if (rc_reset) {
773 netif_err(efx, drv, efx->net_dev,
774 "Unable to recover from chip test\n");
775 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
776 return rc_reset;
777 }
778 746
779 rc = efx_test_phy(efx, tests, flags); 747 rc = efx_test_phy(efx, tests, flags);
780 if (rc && !rc_test) 748 if (rc && !rc_test)
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 9f8d7cea3967..6bafd216e55e 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -25,10 +25,12 @@
25#include "workarounds.h" 25#include "workarounds.h"
26#include "mcdi.h" 26#include "mcdi.h"
27#include "mcdi_pcol.h" 27#include "mcdi_pcol.h"
28#include "selftest.h"
28 29
29/* Hardware control for SFC9000 family including SFL9021 (aka Siena). */ 30/* Hardware control for SFC9000 family including SFL9021 (aka Siena). */
30 31
31static void siena_init_wol(struct efx_nic *efx); 32static void siena_init_wol(struct efx_nic *efx);
33static int siena_reset_hw(struct efx_nic *efx, enum reset_type method);
32 34
33 35
34static void siena_push_irq_moderation(struct efx_channel *channel) 36static void siena_push_irq_moderation(struct efx_channel *channel)
@@ -154,10 +156,29 @@ static const struct efx_nic_register_test siena_register_tests[] = {
154 EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000) }, 156 EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000) },
155}; 157};
156 158
157static int siena_test_registers(struct efx_nic *efx) 159static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
158{ 160{
159 return efx_nic_test_registers(efx, siena_register_tests, 161 enum reset_type reset_method = reset_method;
160 ARRAY_SIZE(siena_register_tests)); 162 int rc, rc2;
163
164 efx_reset_down(efx, reset_method);
165
166 /* Reset the chip immediately so that it is completely
167 * quiescent regardless of what any VF driver does.
168 */
169 rc = siena_reset_hw(efx, reset_method);
170 if (rc)
171 goto out;
172
173 tests->registers =
174 efx_nic_test_registers(efx, siena_register_tests,
175 ARRAY_SIZE(siena_register_tests))
176 ? -1 : 1;
177
178 rc = siena_reset_hw(efx, reset_method);
179out:
180 rc2 = efx_reset_up(efx, reset_method, rc == 0);
181 return rc ? rc : rc2;
161} 182}
162 183
163/************************************************************************** 184/**************************************************************************
@@ -437,8 +458,8 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
437 458
438 MAC_STAT(tx_bytes, TX_BYTES); 459 MAC_STAT(tx_bytes, TX_BYTES);
439 MAC_STAT(tx_bad_bytes, TX_BAD_BYTES); 460 MAC_STAT(tx_bad_bytes, TX_BAD_BYTES);
440 mac_stats->tx_good_bytes = (mac_stats->tx_bytes - 461 efx_update_diff_stat(&mac_stats->tx_good_bytes,
441 mac_stats->tx_bad_bytes); 462 mac_stats->tx_bytes - mac_stats->tx_bad_bytes);
442 MAC_STAT(tx_packets, TX_PKTS); 463 MAC_STAT(tx_packets, TX_PKTS);
443 MAC_STAT(tx_bad, TX_BAD_FCS_PKTS); 464 MAC_STAT(tx_bad, TX_BAD_FCS_PKTS);
444 MAC_STAT(tx_pause, TX_PAUSE_PKTS); 465 MAC_STAT(tx_pause, TX_PAUSE_PKTS);
@@ -471,8 +492,8 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
471 MAC_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS); 492 MAC_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS);
472 MAC_STAT(rx_bytes, RX_BYTES); 493 MAC_STAT(rx_bytes, RX_BYTES);
473 MAC_STAT(rx_bad_bytes, RX_BAD_BYTES); 494 MAC_STAT(rx_bad_bytes, RX_BAD_BYTES);
474 mac_stats->rx_good_bytes = (mac_stats->rx_bytes - 495 efx_update_diff_stat(&mac_stats->rx_good_bytes,
475 mac_stats->rx_bad_bytes); 496 mac_stats->rx_bytes - mac_stats->rx_bad_bytes);
476 MAC_STAT(rx_packets, RX_PKTS); 497 MAC_STAT(rx_packets, RX_PKTS);
477 MAC_STAT(rx_good, RX_GOOD_PKTS); 498 MAC_STAT(rx_good, RX_GOOD_PKTS);
478 MAC_STAT(rx_bad, RX_BAD_FCS_PKTS); 499 MAC_STAT(rx_bad, RX_BAD_FCS_PKTS);
@@ -649,7 +670,7 @@ const struct efx_nic_type siena_a0_nic_type = {
649 .get_wol = siena_get_wol, 670 .get_wol = siena_get_wol,
650 .set_wol = siena_set_wol, 671 .set_wol = siena_set_wol,
651 .resume_wol = siena_init_wol, 672 .resume_wol = siena_init_wol,
652 .test_registers = siena_test_registers, 673 .test_chip = siena_test_chip,
653 .test_nvram = efx_mcdi_nvram_test_all, 674 .test_nvram = efx_mcdi_nvram_test_all,
654 675
655 .revision = EFX_REV_SIENA_A0, 676 .revision = EFX_REV_SIENA_A0,
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 94d0365b31cd..9b225a7769f7 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -36,15 +36,15 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
36 unsigned int *bytes_compl) 36 unsigned int *bytes_compl)
37{ 37{
38 if (buffer->unmap_len) { 38 if (buffer->unmap_len) {
39 struct pci_dev *pci_dev = tx_queue->efx->pci_dev; 39 struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
40 dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len - 40 dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
41 buffer->unmap_len); 41 buffer->unmap_len);
42 if (buffer->unmap_single) 42 if (buffer->unmap_single)
43 pci_unmap_single(pci_dev, unmap_addr, buffer->unmap_len, 43 dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
44 PCI_DMA_TODEVICE); 44 DMA_TO_DEVICE);
45 else 45 else
46 pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len, 46 dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
47 PCI_DMA_TODEVICE); 47 DMA_TO_DEVICE);
48 buffer->unmap_len = 0; 48 buffer->unmap_len = 0;
49 buffer->unmap_single = false; 49 buffer->unmap_single = false;
50 } 50 }
@@ -138,7 +138,7 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
138netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) 138netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
139{ 139{
140 struct efx_nic *efx = tx_queue->efx; 140 struct efx_nic *efx = tx_queue->efx;
141 struct pci_dev *pci_dev = efx->pci_dev; 141 struct device *dma_dev = &efx->pci_dev->dev;
142 struct efx_tx_buffer *buffer; 142 struct efx_tx_buffer *buffer;
143 skb_frag_t *fragment; 143 skb_frag_t *fragment;
144 unsigned int len, unmap_len = 0, fill_level, insert_ptr; 144 unsigned int len, unmap_len = 0, fill_level, insert_ptr;
@@ -167,17 +167,17 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
167 fill_level = tx_queue->insert_count - tx_queue->old_read_count; 167 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
168 q_space = efx->txq_entries - 1 - fill_level; 168 q_space = efx->txq_entries - 1 - fill_level;
169 169
170 /* Map for DMA. Use pci_map_single rather than pci_map_page 170 /* Map for DMA. Use dma_map_single rather than dma_map_page
171 * since this is more efficient on machines with sparse 171 * since this is more efficient on machines with sparse
172 * memory. 172 * memory.
173 */ 173 */
174 unmap_single = true; 174 unmap_single = true;
175 dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE); 175 dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
176 176
177 /* Process all fragments */ 177 /* Process all fragments */
178 while (1) { 178 while (1) {
179 if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr))) 179 if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
180 goto pci_err; 180 goto dma_err;
181 181
182 /* Store fields for marking in the per-fragment final 182 /* Store fields for marking in the per-fragment final
183 * descriptor */ 183 * descriptor */
@@ -246,7 +246,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
246 i++; 246 i++;
247 /* Map for DMA */ 247 /* Map for DMA */
248 unmap_single = false; 248 unmap_single = false;
249 dma_addr = skb_frag_dma_map(&pci_dev->dev, fragment, 0, len, 249 dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
250 DMA_TO_DEVICE); 250 DMA_TO_DEVICE);
251 } 251 }
252 252
@@ -261,7 +261,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
261 261
262 return NETDEV_TX_OK; 262 return NETDEV_TX_OK;
263 263
264 pci_err: 264 dma_err:
265 netif_err(efx, tx_err, efx->net_dev, 265 netif_err(efx, tx_err, efx->net_dev,
266 " TX queue %d could not map skb with %d bytes %d " 266 " TX queue %d could not map skb with %d bytes %d "
267 "fragments for DMA\n", tx_queue->queue, skb->len, 267 "fragments for DMA\n", tx_queue->queue, skb->len,
@@ -284,11 +284,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
284 /* Free the fragment we were mid-way through pushing */ 284 /* Free the fragment we were mid-way through pushing */
285 if (unmap_len) { 285 if (unmap_len) {
286 if (unmap_single) 286 if (unmap_single)
287 pci_unmap_single(pci_dev, unmap_addr, unmap_len, 287 dma_unmap_single(dma_dev, unmap_addr, unmap_len,
288 PCI_DMA_TODEVICE); 288 DMA_TO_DEVICE);
289 else 289 else
290 pci_unmap_page(pci_dev, unmap_addr, unmap_len, 290 dma_unmap_page(dma_dev, unmap_addr, unmap_len,
291 PCI_DMA_TODEVICE); 291 DMA_TO_DEVICE);
292 } 292 }
293 293
294 return rc; 294 return rc;
@@ -651,17 +651,8 @@ static __be16 efx_tso_check_protocol(struct sk_buff *skb)
651 EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto != 651 EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
652 protocol); 652 protocol);
653 if (protocol == htons(ETH_P_8021Q)) { 653 if (protocol == htons(ETH_P_8021Q)) {
654 /* Find the encapsulated protocol; reset network header
655 * and transport header based on that. */
656 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 654 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
657 protocol = veh->h_vlan_encapsulated_proto; 655 protocol = veh->h_vlan_encapsulated_proto;
658 skb_set_network_header(skb, sizeof(*veh));
659 if (protocol == htons(ETH_P_IP))
660 skb_set_transport_header(skb, sizeof(*veh) +
661 4 * ip_hdr(skb)->ihl);
662 else if (protocol == htons(ETH_P_IPV6))
663 skb_set_transport_header(skb, sizeof(*veh) +
664 sizeof(struct ipv6hdr));
665 } 656 }
666 657
667 if (protocol == htons(ETH_P_IP)) { 658 if (protocol == htons(ETH_P_IP)) {
@@ -684,20 +675,19 @@ static __be16 efx_tso_check_protocol(struct sk_buff *skb)
684 */ 675 */
685static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue) 676static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
686{ 677{
687 678 struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
688 struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
689 struct efx_tso_header *tsoh; 679 struct efx_tso_header *tsoh;
690 dma_addr_t dma_addr; 680 dma_addr_t dma_addr;
691 u8 *base_kva, *kva; 681 u8 *base_kva, *kva;
692 682
693 base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr); 683 base_kva = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr, GFP_ATOMIC);
694 if (base_kva == NULL) { 684 if (base_kva == NULL) {
695 netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev, 685 netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev,
696 "Unable to allocate page for TSO headers\n"); 686 "Unable to allocate page for TSO headers\n");
697 return -ENOMEM; 687 return -ENOMEM;
698 } 688 }
699 689
700 /* pci_alloc_consistent() allocates pages. */ 690 /* dma_alloc_coherent() allocates pages. */
701 EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u)); 691 EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
702 692
703 for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) { 693 for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
@@ -714,7 +704,7 @@ static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
714/* Free up a TSO header, and all others in the same page. */ 704/* Free up a TSO header, and all others in the same page. */
715static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue, 705static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
716 struct efx_tso_header *tsoh, 706 struct efx_tso_header *tsoh,
717 struct pci_dev *pci_dev) 707 struct device *dma_dev)
718{ 708{
719 struct efx_tso_header **p; 709 struct efx_tso_header **p;
720 unsigned long base_kva; 710 unsigned long base_kva;
@@ -731,7 +721,7 @@ static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
731 p = &(*p)->next; 721 p = &(*p)->next;
732 } 722 }
733 723
734 pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma); 724 dma_free_coherent(dma_dev, PAGE_SIZE, (void *)base_kva, base_dma);
735} 725}
736 726
737static struct efx_tso_header * 727static struct efx_tso_header *
@@ -743,11 +733,11 @@ efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
743 if (unlikely(!tsoh)) 733 if (unlikely(!tsoh))
744 return NULL; 734 return NULL;
745 735
746 tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev, 736 tsoh->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
747 TSOH_BUFFER(tsoh), header_len, 737 TSOH_BUFFER(tsoh), header_len,
748 PCI_DMA_TODEVICE); 738 DMA_TO_DEVICE);
749 if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev, 739 if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
750 tsoh->dma_addr))) { 740 tsoh->dma_addr))) {
751 kfree(tsoh); 741 kfree(tsoh);
752 return NULL; 742 return NULL;
753 } 743 }
@@ -759,9 +749,9 @@ efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
759static void 749static void
760efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh) 750efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
761{ 751{
762 pci_unmap_single(tx_queue->efx->pci_dev, 752 dma_unmap_single(&tx_queue->efx->pci_dev->dev,
763 tsoh->dma_addr, tsoh->unmap_len, 753 tsoh->dma_addr, tsoh->unmap_len,
764 PCI_DMA_TODEVICE); 754 DMA_TO_DEVICE);
765 kfree(tsoh); 755 kfree(tsoh);
766} 756}
767 757
@@ -892,13 +882,13 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
892 unmap_addr = (buffer->dma_addr + buffer->len - 882 unmap_addr = (buffer->dma_addr + buffer->len -
893 buffer->unmap_len); 883 buffer->unmap_len);
894 if (buffer->unmap_single) 884 if (buffer->unmap_single)
895 pci_unmap_single(tx_queue->efx->pci_dev, 885 dma_unmap_single(&tx_queue->efx->pci_dev->dev,
896 unmap_addr, buffer->unmap_len, 886 unmap_addr, buffer->unmap_len,
897 PCI_DMA_TODEVICE); 887 DMA_TO_DEVICE);
898 else 888 else
899 pci_unmap_page(tx_queue->efx->pci_dev, 889 dma_unmap_page(&tx_queue->efx->pci_dev->dev,
900 unmap_addr, buffer->unmap_len, 890 unmap_addr, buffer->unmap_len,
901 PCI_DMA_TODEVICE); 891 DMA_TO_DEVICE);
902 buffer->unmap_len = 0; 892 buffer->unmap_len = 0;
903 } 893 }
904 buffer->len = 0; 894 buffer->len = 0;
@@ -927,7 +917,6 @@ static void tso_start(struct tso_state *st, const struct sk_buff *skb)
927 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn); 917 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
928 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst); 918 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
929 919
930 st->packet_space = st->full_packet_size;
931 st->out_len = skb->len - st->header_len; 920 st->out_len = skb->len - st->header_len;
932 st->unmap_len = 0; 921 st->unmap_len = 0;
933 st->unmap_single = false; 922 st->unmap_single = false;
@@ -954,9 +943,9 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
954 int hl = st->header_len; 943 int hl = st->header_len;
955 int len = skb_headlen(skb) - hl; 944 int len = skb_headlen(skb) - hl;
956 945
957 st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl, 946 st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl,
958 len, PCI_DMA_TODEVICE); 947 len, DMA_TO_DEVICE);
959 if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) { 948 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
960 st->unmap_single = true; 949 st->unmap_single = true;
961 st->unmap_len = len; 950 st->unmap_len = len;
962 st->in_len = len; 951 st->in_len = len;
@@ -1008,7 +997,7 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
1008 buffer->continuation = !end_of_packet; 997 buffer->continuation = !end_of_packet;
1009 998
1010 if (st->in_len == 0) { 999 if (st->in_len == 0) {
1011 /* Transfer ownership of the pci mapping */ 1000 /* Transfer ownership of the DMA mapping */
1012 buffer->unmap_len = st->unmap_len; 1001 buffer->unmap_len = st->unmap_len;
1013 buffer->unmap_single = st->unmap_single; 1002 buffer->unmap_single = st->unmap_single;
1014 st->unmap_len = 0; 1003 st->unmap_len = 0;
@@ -1181,18 +1170,18 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1181 1170
1182 mem_err: 1171 mem_err:
1183 netif_err(efx, tx_err, efx->net_dev, 1172 netif_err(efx, tx_err, efx->net_dev,
1184 "Out of memory for TSO headers, or PCI mapping error\n"); 1173 "Out of memory for TSO headers, or DMA mapping error\n");
1185 dev_kfree_skb_any(skb); 1174 dev_kfree_skb_any(skb);
1186 1175
1187 unwind: 1176 unwind:
1188 /* Free the DMA mapping we were in the process of writing out */ 1177 /* Free the DMA mapping we were in the process of writing out */
1189 if (state.unmap_len) { 1178 if (state.unmap_len) {
1190 if (state.unmap_single) 1179 if (state.unmap_single)
1191 pci_unmap_single(efx->pci_dev, state.unmap_addr, 1180 dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
1192 state.unmap_len, PCI_DMA_TODEVICE); 1181 state.unmap_len, DMA_TO_DEVICE);
1193 else 1182 else
1194 pci_unmap_page(efx->pci_dev, state.unmap_addr, 1183 dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr,
1195 state.unmap_len, PCI_DMA_TODEVICE); 1184 state.unmap_len, DMA_TO_DEVICE);
1196 } 1185 }
1197 1186
1198 efx_enqueue_unwind(tx_queue); 1187 efx_enqueue_unwind(tx_queue);
@@ -1216,5 +1205,5 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue)
1216 1205
1217 while (tx_queue->tso_headers_free != NULL) 1206 while (tx_queue->tso_headers_free != NULL)
1218 efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, 1207 efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
1219 tx_queue->efx->pci_dev); 1208 &tx_queue->efx->pci_dev->dev);
1220} 1209}
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index ac149d99f78f..b5ba3084c7fc 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -583,7 +583,7 @@ static inline void ioc3_rx(struct net_device *dev)
583 unsigned long *rxr; 583 unsigned long *rxr;
584 u32 w0, err; 584 u32 w0, err;
585 585
586 rxr = (unsigned long *) ip->rxr; /* Ring base */ 586 rxr = ip->rxr; /* Ring base */
587 rx_entry = ip->rx_ci; /* RX consume index */ 587 rx_entry = ip->rx_ci; /* RX consume index */
588 n_entry = ip->rx_pi; 588 n_entry = ip->rx_pi;
589 589
@@ -903,7 +903,7 @@ static void ioc3_alloc_rings(struct net_device *dev)
903 if (ip->rxr == NULL) { 903 if (ip->rxr == NULL) {
904 /* Allocate and initialize rx ring. 4kb = 512 entries */ 904 /* Allocate and initialize rx ring. 4kb = 512 entries */
905 ip->rxr = (unsigned long *) get_zeroed_page(GFP_ATOMIC); 905 ip->rxr = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
906 rxr = (unsigned long *) ip->rxr; 906 rxr = ip->rxr;
907 if (!rxr) 907 if (!rxr)
908 printk("ioc3_alloc_rings(): get_zeroed_page() failed!\n"); 908 printk("ioc3_alloc_rings(): get_zeroed_page() failed!\n");
909 909
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 8814b2f5d46f..8d15f7a74b45 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -773,7 +773,7 @@ static int smc911x_phy_fixed(struct net_device *dev)
773 return 1; 773 return 1;
774} 774}
775 775
776/* 776/**
777 * smc911x_phy_reset - reset the phy 777 * smc911x_phy_reset - reset the phy
778 * @dev: net device 778 * @dev: net device
779 * @phy: phy address 779 * @phy: phy address
@@ -819,7 +819,7 @@ static int smc911x_phy_reset(struct net_device *dev, int phy)
819 return reg & PMT_CTRL_PHY_RST_; 819 return reg & PMT_CTRL_PHY_RST_;
820} 820}
821 821
822/* 822/**
823 * smc911x_phy_powerdown - powerdown phy 823 * smc911x_phy_powerdown - powerdown phy
824 * @dev: net device 824 * @dev: net device
825 * @phy: phy address 825 * @phy: phy address
@@ -837,7 +837,7 @@ static void smc911x_phy_powerdown(struct net_device *dev, int phy)
837 SMC_SET_PHY_BMCR(lp, phy, bmcr); 837 SMC_SET_PHY_BMCR(lp, phy, bmcr);
838} 838}
839 839
840/* 840/**
841 * smc911x_phy_check_media - check the media status and adjust BMCR 841 * smc911x_phy_check_media - check the media status and adjust BMCR
842 * @dev: net device 842 * @dev: net device
843 * @init: set true for initialisation 843 * @init: set true for initialisation
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index fee449355014..318adc935a53 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -942,7 +942,7 @@ static int smc_phy_fixed(struct net_device *dev)
942 return 1; 942 return 1;
943} 943}
944 944
945/* 945/**
946 * smc_phy_reset - reset the phy 946 * smc_phy_reset - reset the phy
947 * @dev: net device 947 * @dev: net device
948 * @phy: phy address 948 * @phy: phy address
@@ -976,7 +976,7 @@ static int smc_phy_reset(struct net_device *dev, int phy)
976 return bmcr & BMCR_RESET; 976 return bmcr & BMCR_RESET;
977} 977}
978 978
979/* 979/**
980 * smc_phy_powerdown - powerdown phy 980 * smc_phy_powerdown - powerdown phy
981 * @dev: net device 981 * @dev: net device
982 * 982 *
@@ -1000,7 +1000,7 @@ static void smc_phy_powerdown(struct net_device *dev)
1000 smc_phy_write(dev, phy, MII_BMCR, bmcr | BMCR_PDOWN); 1000 smc_phy_write(dev, phy, MII_BMCR, bmcr | BMCR_PDOWN);
1001} 1001}
1002 1002
1003/* 1003/**
1004 * smc_phy_check_media - check the media status and adjust TCR 1004 * smc_phy_check_media - check the media status and adjust TCR
1005 * @dev: net device 1005 * @dev: net device
1006 * @init: set true for initialisation 1006 * @init: set true for initialisation
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 1466e5d2af44..62d1baf111ea 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1442,6 +1442,14 @@ smsc911x_set_hw_mac_address(struct smsc911x_data *pdata, u8 dev_addr[6])
1442 smsc911x_mac_write(pdata, ADDRL, mac_low32); 1442 smsc911x_mac_write(pdata, ADDRL, mac_low32);
1443} 1443}
1444 1444
1445static void smsc911x_disable_irq_chip(struct net_device *dev)
1446{
1447 struct smsc911x_data *pdata = netdev_priv(dev);
1448
1449 smsc911x_reg_write(pdata, INT_EN, 0);
1450 smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
1451}
1452
1445static int smsc911x_open(struct net_device *dev) 1453static int smsc911x_open(struct net_device *dev)
1446{ 1454{
1447 struct smsc911x_data *pdata = netdev_priv(dev); 1455 struct smsc911x_data *pdata = netdev_priv(dev);
@@ -1494,8 +1502,7 @@ static int smsc911x_open(struct net_device *dev)
1494 spin_unlock_irq(&pdata->mac_lock); 1502 spin_unlock_irq(&pdata->mac_lock);
1495 1503
1496 /* Initialise irqs, but leave all sources disabled */ 1504 /* Initialise irqs, but leave all sources disabled */
1497 smsc911x_reg_write(pdata, INT_EN, 0); 1505 smsc911x_disable_irq_chip(dev);
1498 smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
1499 1506
1500 /* Set interrupt deassertion to 100uS */ 1507 /* Set interrupt deassertion to 100uS */
1501 intcfg = ((10 << 24) | INT_CFG_IRQ_EN_); 1508 intcfg = ((10 << 24) | INT_CFG_IRQ_EN_);
@@ -2215,9 +2222,6 @@ static int __devinit smsc911x_init(struct net_device *dev)
2215 if (smsc911x_soft_reset(pdata)) 2222 if (smsc911x_soft_reset(pdata))
2216 return -ENODEV; 2223 return -ENODEV;
2217 2224
2218 /* Disable all interrupt sources until we bring the device up */
2219 smsc911x_reg_write(pdata, INT_EN, 0);
2220
2221 ether_setup(dev); 2225 ether_setup(dev);
2222 dev->flags |= IFF_MULTICAST; 2226 dev->flags |= IFF_MULTICAST;
2223 netif_napi_add(dev, &pdata->napi, smsc911x_poll, SMSC_NAPI_WEIGHT); 2227 netif_napi_add(dev, &pdata->napi, smsc911x_poll, SMSC_NAPI_WEIGHT);
@@ -2434,8 +2438,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
2434 smsc911x_reg_write(pdata, INT_CFG, intcfg); 2438 smsc911x_reg_write(pdata, INT_CFG, intcfg);
2435 2439
2436 /* Ensure interrupts are globally disabled before connecting ISR */ 2440 /* Ensure interrupts are globally disabled before connecting ISR */
2437 smsc911x_reg_write(pdata, INT_EN, 0); 2441 smsc911x_disable_irq_chip(dev);
2438 smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
2439 2442
2440 retval = request_irq(dev->irq, smsc911x_irqhandler, 2443 retval = request_irq(dev->irq, smsc911x_irqhandler,
2441 irq_flags | IRQF_SHARED, dev->name, dev); 2444 irq_flags | IRQF_SHARED, dev->name, dev);
@@ -2485,7 +2488,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
2485 eth_hw_addr_random(dev); 2488 eth_hw_addr_random(dev);
2486 smsc911x_set_hw_mac_address(pdata, dev->dev_addr); 2489 smsc911x_set_hw_mac_address(pdata, dev->dev_addr);
2487 SMSC_TRACE(pdata, probe, 2490 SMSC_TRACE(pdata, probe,
2488 "MAC Address is set to random_ether_addr"); 2491 "MAC Address is set to eth_random_addr");
2489 } 2492 }
2490 } 2493 }
2491 2494
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index fd33b21f6c96..1fcd914ec39b 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -1640,8 +1640,7 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1640 goto out_free_io_4; 1640 goto out_free_io_4;
1641 1641
1642 /* descriptors are aligned due to the nature of pci_alloc_consistent */ 1642 /* descriptors are aligned due to the nature of pci_alloc_consistent */
1643 pd->tx_ring = (struct smsc9420_dma_desc *) 1643 pd->tx_ring = (pd->rx_ring + RX_RING_SIZE);
1644 (pd->rx_ring + RX_RING_SIZE);
1645 pd->tx_dma_addr = pd->rx_dma_addr + 1644 pd->tx_dma_addr = pd->rx_dma_addr +
1646 sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE; 1645 sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE;
1647 1646
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index bcd54d6e94fd..e2d083228f3a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -95,6 +95,16 @@ struct stmmac_extra_stats {
95 unsigned long poll_n; 95 unsigned long poll_n;
96 unsigned long sched_timer_n; 96 unsigned long sched_timer_n;
97 unsigned long normal_irq_n; 97 unsigned long normal_irq_n;
98 unsigned long mmc_tx_irq_n;
99 unsigned long mmc_rx_irq_n;
100 unsigned long mmc_rx_csum_offload_irq_n;
101 /* EEE */
102 unsigned long irq_receive_pmt_irq_n;
103 unsigned long irq_tx_path_in_lpi_mode_n;
104 unsigned long irq_tx_path_exit_lpi_mode_n;
105 unsigned long irq_rx_path_in_lpi_mode_n;
106 unsigned long irq_rx_path_exit_lpi_mode_n;
107 unsigned long phy_eee_wakeup_error_n;
98}; 108};
99 109
100/* CSR Frequency Access Defines*/ 110/* CSR Frequency Access Defines*/
@@ -162,6 +172,17 @@ enum tx_dma_irq_status {
162 handle_tx_rx = 3, 172 handle_tx_rx = 3,
163}; 173};
164 174
175enum core_specific_irq_mask {
176 core_mmc_tx_irq = 1,
177 core_mmc_rx_irq = 2,
178 core_mmc_rx_csum_offload_irq = 4,
179 core_irq_receive_pmt_irq = 8,
180 core_irq_tx_path_in_lpi_mode = 16,
181 core_irq_tx_path_exit_lpi_mode = 32,
182 core_irq_rx_path_in_lpi_mode = 64,
183 core_irq_rx_path_exit_lpi_mode = 128,
184};
185
165/* DMA HW capabilities */ 186/* DMA HW capabilities */
166struct dma_features { 187struct dma_features {
167 unsigned int mbps_10_100; 188 unsigned int mbps_10_100;
@@ -208,6 +229,10 @@ struct dma_features {
208#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */ 229#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
209#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */ 230#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
210 231
232/* Default LPI timers */
233#define STMMAC_DEFAULT_LIT_LS_TIMER 0x3E8
234#define STMMAC_DEFAULT_TWT_LS_TIMER 0x0
235
211struct stmmac_desc_ops { 236struct stmmac_desc_ops {
212 /* DMA RX descriptor ring initialization */ 237 /* DMA RX descriptor ring initialization */
213 void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size, 238 void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size,
@@ -278,7 +303,7 @@ struct stmmac_ops {
278 /* Dump MAC registers */ 303 /* Dump MAC registers */
279 void (*dump_regs) (void __iomem *ioaddr); 304 void (*dump_regs) (void __iomem *ioaddr);
280 /* Handle extra events on specific interrupts hw dependent */ 305 /* Handle extra events on specific interrupts hw dependent */
281 void (*host_irq_status) (void __iomem *ioaddr); 306 int (*host_irq_status) (void __iomem *ioaddr);
282 /* Multicast filter setting */ 307 /* Multicast filter setting */
283 void (*set_filter) (struct net_device *dev, int id); 308 void (*set_filter) (struct net_device *dev, int id);
284 /* Flow control setting */ 309 /* Flow control setting */
@@ -291,6 +316,10 @@ struct stmmac_ops {
291 unsigned int reg_n); 316 unsigned int reg_n);
292 void (*get_umac_addr) (void __iomem *ioaddr, unsigned char *addr, 317 void (*get_umac_addr) (void __iomem *ioaddr, unsigned char *addr,
293 unsigned int reg_n); 318 unsigned int reg_n);
319 void (*set_eee_mode) (void __iomem *ioaddr);
320 void (*reset_eee_mode) (void __iomem *ioaddr);
321 void (*set_eee_timer) (void __iomem *ioaddr, int ls, int tw);
322 void (*set_eee_pls) (void __iomem *ioaddr, int link);
294}; 323};
295 324
296struct mac_link { 325struct mac_link {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 23478bf4ed7a..f90fcb5f9573 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -36,6 +36,7 @@
36 36
37#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */ 37#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */
38enum dwmac1000_irq_status { 38enum dwmac1000_irq_status {
39 lpiis_irq = 0x400,
39 time_stamp_irq = 0x0200, 40 time_stamp_irq = 0x0200,
40 mmc_rx_csum_offload_irq = 0x0080, 41 mmc_rx_csum_offload_irq = 0x0080,
41 mmc_tx_irq = 0x0040, 42 mmc_tx_irq = 0x0040,
@@ -60,6 +61,25 @@ enum power_event {
60 power_down = 0x00000001, 61 power_down = 0x00000001,
61}; 62};
62 63
64/* Energy Efficient Ethernet (EEE)
65 *
66 * LPI status, timer and control register offset
67 */
68#define LPI_CTRL_STATUS 0x0030
69#define LPI_TIMER_CTRL 0x0034
70
71/* LPI control and status defines */
72#define LPI_CTRL_STATUS_LPITXA 0x00080000 /* Enable LPI TX Automate */
73#define LPI_CTRL_STATUS_PLSEN 0x00040000 /* Enable PHY Link Status */
74#define LPI_CTRL_STATUS_PLS 0x00020000 /* PHY Link Status */
75#define LPI_CTRL_STATUS_LPIEN 0x00010000 /* LPI Enable */
76#define LPI_CTRL_STATUS_RLPIST 0x00000200 /* Receive LPI state */
77#define LPI_CTRL_STATUS_TLPIST 0x00000100 /* Transmit LPI state */
78#define LPI_CTRL_STATUS_RLPIEX 0x00000008 /* Receive LPI Exit */
79#define LPI_CTRL_STATUS_RLPIEN 0x00000004 /* Receive LPI Entry */
80#define LPI_CTRL_STATUS_TLPIEX 0x00000002 /* Transmit LPI Exit */
81#define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */
82
63/* GMAC HW ADDR regs */ 83/* GMAC HW ADDR regs */
64#define GMAC_ADDR_HIGH(reg) (((reg > 15) ? 0x00000800 : 0x00000040) + \ 84#define GMAC_ADDR_HIGH(reg) (((reg > 15) ? 0x00000800 : 0x00000040) + \
65 (reg * 8)) 85 (reg * 8))
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index b5e4d02f15c9..bfe022605498 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -194,26 +194,107 @@ static void dwmac1000_pmt(void __iomem *ioaddr, unsigned long mode)
194} 194}
195 195
196 196
197static void dwmac1000_irq_status(void __iomem *ioaddr) 197static int dwmac1000_irq_status(void __iomem *ioaddr)
198{ 198{
199 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS); 199 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
200 int status = 0;
200 201
201 /* Not used events (e.g. MMC interrupts) are not handled. */ 202 /* Not used events (e.g. MMC interrupts) are not handled. */
202 if ((intr_status & mmc_tx_irq)) 203 if ((intr_status & mmc_tx_irq)) {
203 CHIP_DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n", 204 CHIP_DBG(KERN_INFO "GMAC: MMC tx interrupt: 0x%08x\n",
204 readl(ioaddr + GMAC_MMC_TX_INTR)); 205 readl(ioaddr + GMAC_MMC_TX_INTR));
205 if (unlikely(intr_status & mmc_rx_irq)) 206 status |= core_mmc_tx_irq;
206 CHIP_DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n", 207 }
208 if (unlikely(intr_status & mmc_rx_irq)) {
209 CHIP_DBG(KERN_INFO "GMAC: MMC rx interrupt: 0x%08x\n",
207 readl(ioaddr + GMAC_MMC_RX_INTR)); 210 readl(ioaddr + GMAC_MMC_RX_INTR));
208 if (unlikely(intr_status & mmc_rx_csum_offload_irq)) 211 status |= core_mmc_rx_irq;
209 CHIP_DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n", 212 }
213 if (unlikely(intr_status & mmc_rx_csum_offload_irq)) {
214 CHIP_DBG(KERN_INFO "GMAC: MMC rx csum offload: 0x%08x\n",
210 readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD)); 215 readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
216 status |= core_mmc_rx_csum_offload_irq;
217 }
211 if (unlikely(intr_status & pmt_irq)) { 218 if (unlikely(intr_status & pmt_irq)) {
212 CHIP_DBG(KERN_DEBUG "GMAC: received Magic frame\n"); 219 CHIP_DBG(KERN_INFO "GMAC: received Magic frame\n");
213 /* clear the PMT bits 5 and 6 by reading the PMT 220 /* clear the PMT bits 5 and 6 by reading the PMT
214 * status register. */ 221 * status register. */
215 readl(ioaddr + GMAC_PMT); 222 readl(ioaddr + GMAC_PMT);
223 status |= core_irq_receive_pmt_irq;
216 } 224 }
225 /* MAC trx/rx EEE LPI entry/exit interrupts */
226 if (intr_status & lpiis_irq) {
227 /* Clean LPI interrupt by reading the Reg 12 */
228 u32 lpi_status = readl(ioaddr + LPI_CTRL_STATUS);
229
230 if (lpi_status & LPI_CTRL_STATUS_TLPIEN) {
231 CHIP_DBG(KERN_INFO "GMAC TX entered in LPI\n");
232 status |= core_irq_tx_path_in_lpi_mode;
233 }
234 if (lpi_status & LPI_CTRL_STATUS_TLPIEX) {
235 CHIP_DBG(KERN_INFO "GMAC TX exit from LPI\n");
236 status |= core_irq_tx_path_exit_lpi_mode;
237 }
238 if (lpi_status & LPI_CTRL_STATUS_RLPIEN) {
239 CHIP_DBG(KERN_INFO "GMAC RX entered in LPI\n");
240 status |= core_irq_rx_path_in_lpi_mode;
241 }
242 if (lpi_status & LPI_CTRL_STATUS_RLPIEX) {
243 CHIP_DBG(KERN_INFO "GMAC RX exit from LPI\n");
244 status |= core_irq_rx_path_exit_lpi_mode;
245 }
246 }
247
248 return status;
249}
250
251static void dwmac1000_set_eee_mode(void __iomem *ioaddr)
252{
253 u32 value;
254
255 /* Enable the link status receive on RGMII, SGMII ore SMII
256 * receive path and instruct the transmit to enter in LPI
257 * state. */
258 value = readl(ioaddr + LPI_CTRL_STATUS);
259 value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
260 writel(value, ioaddr + LPI_CTRL_STATUS);
261}
262
263static void dwmac1000_reset_eee_mode(void __iomem *ioaddr)
264{
265 u32 value;
266
267 value = readl(ioaddr + LPI_CTRL_STATUS);
268 value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA);
269 writel(value, ioaddr + LPI_CTRL_STATUS);
270}
271
272static void dwmac1000_set_eee_pls(void __iomem *ioaddr, int link)
273{
274 u32 value;
275
276 value = readl(ioaddr + LPI_CTRL_STATUS);
277
278 if (link)
279 value |= LPI_CTRL_STATUS_PLS;
280 else
281 value &= ~LPI_CTRL_STATUS_PLS;
282
283 writel(value, ioaddr + LPI_CTRL_STATUS);
284}
285
286static void dwmac1000_set_eee_timer(void __iomem *ioaddr, int ls, int tw)
287{
288 int value = ((tw & 0xffff)) | ((ls & 0x7ff) << 16);
289
290 /* Program the timers in the LPI timer control register:
291 * LS: minimum time (ms) for which the link
292 * status from PHY should be ok before transmitting
293 * the LPI pattern.
294 * TW: minimum time (us) for which the core waits
295 * after it has stopped transmitting the LPI pattern.
296 */
297 writel(value, ioaddr + LPI_TIMER_CTRL);
217} 298}
218 299
219static const struct stmmac_ops dwmac1000_ops = { 300static const struct stmmac_ops dwmac1000_ops = {
@@ -226,6 +307,10 @@ static const struct stmmac_ops dwmac1000_ops = {
226 .pmt = dwmac1000_pmt, 307 .pmt = dwmac1000_pmt,
227 .set_umac_addr = dwmac1000_set_umac_addr, 308 .set_umac_addr = dwmac1000_set_umac_addr,
228 .get_umac_addr = dwmac1000_get_umac_addr, 309 .get_umac_addr = dwmac1000_get_umac_addr,
310 .set_eee_mode = dwmac1000_set_eee_mode,
311 .reset_eee_mode = dwmac1000_reset_eee_mode,
312 .set_eee_timer = dwmac1000_set_eee_timer,
313 .set_eee_pls = dwmac1000_set_eee_pls,
229}; 314};
230 315
231struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr) 316struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 19e0f4eed2bc..f83210e7c221 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -72,9 +72,9 @@ static int dwmac100_rx_ipc_enable(void __iomem *ioaddr)
72 return 0; 72 return 0;
73} 73}
74 74
75static void dwmac100_irq_status(void __iomem *ioaddr) 75static int dwmac100_irq_status(void __iomem *ioaddr)
76{ 76{
77 return; 77 return 0;
78} 78}
79 79
80static void dwmac100_set_umac_addr(void __iomem *ioaddr, unsigned char *addr, 80static void dwmac100_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 6e0360f9cfde..e678ce39d014 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -70,6 +70,7 @@
70#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL) 70#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
71 71
72/* DMA Status register defines */ 72/* DMA Status register defines */
73#define DMA_STATUS_GLPII 0x40000000 /* GMAC LPI interrupt */
73#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */ 74#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
74#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */ 75#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
75#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */ 76#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index dc20c56efc9d..ab4c376cb276 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -87,6 +87,12 @@ struct stmmac_priv {
87#endif 87#endif
88 int clk_csr; 88 int clk_csr;
89 int synopsys_id; 89 int synopsys_id;
90 struct timer_list eee_ctrl_timer;
91 bool tx_path_in_lpi_mode;
92 int lpi_irq;
93 int eee_enabled;
94 int eee_active;
95 int tx_lpi_timer;
90}; 96};
91 97
92extern int phyaddr; 98extern int phyaddr;
@@ -104,6 +110,8 @@ int stmmac_dvr_remove(struct net_device *ndev);
104struct stmmac_priv *stmmac_dvr_probe(struct device *device, 110struct stmmac_priv *stmmac_dvr_probe(struct device *device,
105 struct plat_stmmacenet_data *plat_dat, 111 struct plat_stmmacenet_data *plat_dat,
106 void __iomem *addr); 112 void __iomem *addr);
113void stmmac_disable_eee_mode(struct stmmac_priv *priv);
114bool stmmac_eee_init(struct stmmac_priv *priv);
107 115
108#ifdef CONFIG_HAVE_CLK 116#ifdef CONFIG_HAVE_CLK
109static inline int stmmac_clk_enable(struct stmmac_priv *priv) 117static inline int stmmac_clk_enable(struct stmmac_priv *priv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index ce431846fc6f..76fd61aa005f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -93,6 +93,16 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
93 STMMAC_STAT(poll_n), 93 STMMAC_STAT(poll_n),
94 STMMAC_STAT(sched_timer_n), 94 STMMAC_STAT(sched_timer_n),
95 STMMAC_STAT(normal_irq_n), 95 STMMAC_STAT(normal_irq_n),
96 STMMAC_STAT(normal_irq_n),
97 STMMAC_STAT(mmc_tx_irq_n),
98 STMMAC_STAT(mmc_rx_irq_n),
99 STMMAC_STAT(mmc_rx_csum_offload_irq_n),
100 STMMAC_STAT(irq_receive_pmt_irq_n),
101 STMMAC_STAT(irq_tx_path_in_lpi_mode_n),
102 STMMAC_STAT(irq_tx_path_exit_lpi_mode_n),
103 STMMAC_STAT(irq_rx_path_in_lpi_mode_n),
104 STMMAC_STAT(irq_rx_path_exit_lpi_mode_n),
105 STMMAC_STAT(phy_eee_wakeup_error_n),
96}; 106};
97#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats) 107#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
98 108
@@ -366,6 +376,11 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
366 (*(u32 *)p); 376 (*(u32 *)p);
367 } 377 }
368 } 378 }
379 if (priv->eee_enabled) {
380 int val = phy_get_eee_err(priv->phydev);
381 if (val)
382 priv->xstats.phy_eee_wakeup_error_n = val;
383 }
369 } 384 }
370 for (i = 0; i < STMMAC_STATS_LEN; i++) { 385 for (i = 0; i < STMMAC_STATS_LEN; i++) {
371 char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset; 386 char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
@@ -464,6 +479,46 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
464 return 0; 479 return 0;
465} 480}
466 481
482static int stmmac_ethtool_op_get_eee(struct net_device *dev,
483 struct ethtool_eee *edata)
484{
485 struct stmmac_priv *priv = netdev_priv(dev);
486
487 if (!priv->dma_cap.eee)
488 return -EOPNOTSUPP;
489
490 edata->eee_enabled = priv->eee_enabled;
491 edata->eee_active = priv->eee_active;
492 edata->tx_lpi_timer = priv->tx_lpi_timer;
493
494 return phy_ethtool_get_eee(priv->phydev, edata);
495}
496
497static int stmmac_ethtool_op_set_eee(struct net_device *dev,
498 struct ethtool_eee *edata)
499{
500 struct stmmac_priv *priv = netdev_priv(dev);
501
502 priv->eee_enabled = edata->eee_enabled;
503
504 if (!priv->eee_enabled)
505 stmmac_disable_eee_mode(priv);
506 else {
507 /* We are asking for enabling the EEE but it is safe
508 * to verify all by invoking the eee_init function.
509 * In case of failure it will return an error.
510 */
511 priv->eee_enabled = stmmac_eee_init(priv);
512 if (!priv->eee_enabled)
513 return -EOPNOTSUPP;
514
515 /* Do not change tx_lpi_timer in case of failure */
516 priv->tx_lpi_timer = edata->tx_lpi_timer;
517 }
518
519 return phy_ethtool_set_eee(priv->phydev, edata);
520}
521
467static const struct ethtool_ops stmmac_ethtool_ops = { 522static const struct ethtool_ops stmmac_ethtool_ops = {
468 .begin = stmmac_check_if_running, 523 .begin = stmmac_check_if_running,
469 .get_drvinfo = stmmac_ethtool_getdrvinfo, 524 .get_drvinfo = stmmac_ethtool_getdrvinfo,
@@ -480,6 +535,8 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
480 .get_strings = stmmac_get_strings, 535 .get_strings = stmmac_get_strings,
481 .get_wol = stmmac_get_wol, 536 .get_wol = stmmac_get_wol,
482 .set_wol = stmmac_set_wol, 537 .set_wol = stmmac_set_wol,
538 .get_eee = stmmac_ethtool_op_get_eee,
539 .set_eee = stmmac_ethtool_op_set_eee,
483 .get_sset_count = stmmac_get_sset_count, 540 .get_sset_count = stmmac_get_sset_count,
484 .get_ts_info = ethtool_op_get_ts_info, 541 .get_ts_info = ethtool_op_get_ts_info,
485}; 542};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index ea3003edde18..f6b04c1a3672 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -133,6 +133,12 @@ static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
133 NETIF_MSG_LINK | NETIF_MSG_IFUP | 133 NETIF_MSG_LINK | NETIF_MSG_IFUP |
134 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); 134 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
135 135
136#define STMMAC_DEFAULT_LPI_TIMER 1000
137static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
138module_param(eee_timer, int, S_IRUGO | S_IWUSR);
139MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
140#define STMMAC_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x))
141
136static irqreturn_t stmmac_interrupt(int irq, void *dev_id); 142static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
137 143
138#ifdef CONFIG_STMMAC_DEBUG_FS 144#ifdef CONFIG_STMMAC_DEBUG_FS
@@ -161,6 +167,8 @@ static void stmmac_verify_args(void)
161 flow_ctrl = FLOW_OFF; 167 flow_ctrl = FLOW_OFF;
162 if (unlikely((pause < 0) || (pause > 0xffff))) 168 if (unlikely((pause < 0) || (pause > 0xffff)))
163 pause = PAUSE_TIME; 169 pause = PAUSE_TIME;
170 if (eee_timer < 0)
171 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
164} 172}
165 173
166static void stmmac_clk_csr_set(struct stmmac_priv *priv) 174static void stmmac_clk_csr_set(struct stmmac_priv *priv)
@@ -229,6 +237,85 @@ static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
229 phydev->speed); 237 phydev->speed);
230} 238}
231 239
240static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
241{
242 /* Check and enter in LPI mode */
243 if ((priv->dirty_tx == priv->cur_tx) &&
244 (priv->tx_path_in_lpi_mode == false))
245 priv->hw->mac->set_eee_mode(priv->ioaddr);
246}
247
248void stmmac_disable_eee_mode(struct stmmac_priv *priv)
249{
250 /* Exit and disable EEE in case of we are are in LPI state. */
251 priv->hw->mac->reset_eee_mode(priv->ioaddr);
252 del_timer_sync(&priv->eee_ctrl_timer);
253 priv->tx_path_in_lpi_mode = false;
254}
255
256/**
257 * stmmac_eee_ctrl_timer
258 * @arg : data hook
259 * Description:
260 * If there is no data transfer and if we are not in LPI state,
261 * then MAC Transmitter can be moved to LPI state.
262 */
263static void stmmac_eee_ctrl_timer(unsigned long arg)
264{
265 struct stmmac_priv *priv = (struct stmmac_priv *)arg;
266
267 stmmac_enable_eee_mode(priv);
268 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer));
269}
270
271/**
272 * stmmac_eee_init
273 * @priv: private device pointer
274 * Description:
275 * If the EEE support has been enabled while configuring the driver,
276 * if the GMAC actually supports the EEE (from the HW cap reg) and the
277 * phy can also manage EEE, so enable the LPI state and start the timer
278 * to verify if the tx path can enter in LPI state.
279 */
280bool stmmac_eee_init(struct stmmac_priv *priv)
281{
282 bool ret = false;
283
284 /* MAC core supports the EEE feature. */
285 if (priv->dma_cap.eee) {
286 /* Check if the PHY supports EEE */
287 if (phy_init_eee(priv->phydev, 1))
288 goto out;
289
290 priv->eee_active = 1;
291 init_timer(&priv->eee_ctrl_timer);
292 priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer;
293 priv->eee_ctrl_timer.data = (unsigned long)priv;
294 priv->eee_ctrl_timer.expires = STMMAC_LPI_TIMER(eee_timer);
295 add_timer(&priv->eee_ctrl_timer);
296
297 priv->hw->mac->set_eee_timer(priv->ioaddr,
298 STMMAC_DEFAULT_LIT_LS_TIMER,
299 priv->tx_lpi_timer);
300
301 pr_info("stmmac: Energy-Efficient Ethernet initialized\n");
302
303 ret = true;
304 }
305out:
306 return ret;
307}
308
309static void stmmac_eee_adjust(struct stmmac_priv *priv)
310{
311 /* When the EEE has been already initialised we have to
312 * modify the PLS bit in the LPI ctrl & status reg according
313 * to the PHY link status. For this reason.
314 */
315 if (priv->eee_enabled)
316 priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link);
317}
318
232/** 319/**
233 * stmmac_adjust_link 320 * stmmac_adjust_link
234 * @dev: net device structure 321 * @dev: net device structure
@@ -249,6 +336,7 @@ static void stmmac_adjust_link(struct net_device *dev)
249 phydev->addr, phydev->link); 336 phydev->addr, phydev->link);
250 337
251 spin_lock_irqsave(&priv->lock, flags); 338 spin_lock_irqsave(&priv->lock, flags);
339
252 if (phydev->link) { 340 if (phydev->link) {
253 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG); 341 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
254 342
@@ -315,6 +403,8 @@ static void stmmac_adjust_link(struct net_device *dev)
315 if (new_state && netif_msg_link(priv)) 403 if (new_state && netif_msg_link(priv))
316 phy_print_status(phydev); 404 phy_print_status(phydev);
317 405
406 stmmac_eee_adjust(priv);
407
318 spin_unlock_irqrestore(&priv->lock, flags); 408 spin_unlock_irqrestore(&priv->lock, flags);
319 409
320 DBG(probe, DEBUG, "stmmac_adjust_link: exiting\n"); 410 DBG(probe, DEBUG, "stmmac_adjust_link: exiting\n");
@@ -332,7 +422,7 @@ static int stmmac_init_phy(struct net_device *dev)
332{ 422{
333 struct stmmac_priv *priv = netdev_priv(dev); 423 struct stmmac_priv *priv = netdev_priv(dev);
334 struct phy_device *phydev; 424 struct phy_device *phydev;
335 char phy_id[MII_BUS_ID_SIZE + 3]; 425 char phy_id_fmt[MII_BUS_ID_SIZE + 3];
336 char bus_id[MII_BUS_ID_SIZE]; 426 char bus_id[MII_BUS_ID_SIZE];
337 int interface = priv->plat->interface; 427 int interface = priv->plat->interface;
338 priv->oldlink = 0; 428 priv->oldlink = 0;
@@ -346,11 +436,12 @@ static int stmmac_init_phy(struct net_device *dev)
346 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x", 436 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
347 priv->plat->bus_id); 437 priv->plat->bus_id);
348 438
349 snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, 439 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
350 priv->plat->phy_addr); 440 priv->plat->phy_addr);
351 pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id); 441 pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id_fmt);
352 442
353 phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0, interface); 443 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, 0,
444 interface);
354 445
355 if (IS_ERR(phydev)) { 446 if (IS_ERR(phydev)) {
356 pr_err("%s: Could not attach to PHY\n", dev->name); 447 pr_err("%s: Could not attach to PHY\n", dev->name);
@@ -677,7 +768,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
677 768
678 priv->hw->desc->release_tx_desc(p); 769 priv->hw->desc->release_tx_desc(p);
679 770
680 entry = (++priv->dirty_tx) % txsize; 771 priv->dirty_tx++;
681 } 772 }
682 if (unlikely(netif_queue_stopped(priv->dev) && 773 if (unlikely(netif_queue_stopped(priv->dev) &&
683 stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) { 774 stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
@@ -689,6 +780,11 @@ static void stmmac_tx(struct stmmac_priv *priv)
689 } 780 }
690 netif_tx_unlock(priv->dev); 781 netif_tx_unlock(priv->dev);
691 } 782 }
783
784 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
785 stmmac_enable_eee_mode(priv);
786 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer));
787 }
692 spin_unlock(&priv->tx_lock); 788 spin_unlock(&priv->tx_lock);
693} 789}
694 790
@@ -1027,6 +1123,17 @@ static int stmmac_open(struct net_device *dev)
1027 } 1123 }
1028 } 1124 }
1029 1125
1126 /* Request the IRQ lines */
1127 if (priv->lpi_irq != -ENXIO) {
1128 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
1129 dev->name, dev);
1130 if (unlikely(ret < 0)) {
1131 pr_err("%s: ERROR: allocating the LPI IRQ %d (%d)\n",
1132 __func__, priv->lpi_irq, ret);
1133 goto open_error_lpiirq;
1134 }
1135 }
1136
1030 /* Enable the MAC Rx/Tx */ 1137 /* Enable the MAC Rx/Tx */
1031 stmmac_set_mac(priv->ioaddr, true); 1138 stmmac_set_mac(priv->ioaddr, true);
1032 1139
@@ -1062,12 +1169,19 @@ static int stmmac_open(struct net_device *dev)
1062 if (priv->phydev) 1169 if (priv->phydev)
1063 phy_start(priv->phydev); 1170 phy_start(priv->phydev);
1064 1171
1172 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS_TIMER;
1173 priv->eee_enabled = stmmac_eee_init(priv);
1174
1065 napi_enable(&priv->napi); 1175 napi_enable(&priv->napi);
1066 skb_queue_head_init(&priv->rx_recycle); 1176 skb_queue_head_init(&priv->rx_recycle);
1067 netif_start_queue(dev); 1177 netif_start_queue(dev);
1068 1178
1069 return 0; 1179 return 0;
1070 1180
1181open_error_lpiirq:
1182 if (priv->wol_irq != dev->irq)
1183 free_irq(priv->wol_irq, dev);
1184
1071open_error_wolirq: 1185open_error_wolirq:
1072 free_irq(dev->irq, dev); 1186 free_irq(dev->irq, dev);
1073 1187
@@ -1093,6 +1207,9 @@ static int stmmac_release(struct net_device *dev)
1093{ 1207{
1094 struct stmmac_priv *priv = netdev_priv(dev); 1208 struct stmmac_priv *priv = netdev_priv(dev);
1095 1209
1210 if (priv->eee_enabled)
1211 del_timer_sync(&priv->eee_ctrl_timer);
1212
1096 /* Stop and disconnect the PHY */ 1213 /* Stop and disconnect the PHY */
1097 if (priv->phydev) { 1214 if (priv->phydev) {
1098 phy_stop(priv->phydev); 1215 phy_stop(priv->phydev);
@@ -1115,6 +1232,8 @@ static int stmmac_release(struct net_device *dev)
1115 free_irq(dev->irq, dev); 1232 free_irq(dev->irq, dev);
1116 if (priv->wol_irq != dev->irq) 1233 if (priv->wol_irq != dev->irq)
1117 free_irq(priv->wol_irq, dev); 1234 free_irq(priv->wol_irq, dev);
1235 if (priv->lpi_irq != -ENXIO)
1236 free_irq(priv->lpi_irq, dev);
1118 1237
1119 /* Stop TX/RX DMA and clear the descriptors */ 1238 /* Stop TX/RX DMA and clear the descriptors */
1120 priv->hw->dma->stop_tx(priv->ioaddr); 1239 priv->hw->dma->stop_tx(priv->ioaddr);
@@ -1164,6 +1283,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1164 1283
1165 spin_lock(&priv->tx_lock); 1284 spin_lock(&priv->tx_lock);
1166 1285
1286 if (priv->tx_path_in_lpi_mode)
1287 stmmac_disable_eee_mode(priv);
1288
1167 entry = priv->cur_tx % txsize; 1289 entry = priv->cur_tx % txsize;
1168 1290
1169#ifdef STMMAC_XMIT_DEBUG 1291#ifdef STMMAC_XMIT_DEBUG
@@ -1311,7 +1433,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
1311 display_ring(priv->dma_rx, rxsize); 1433 display_ring(priv->dma_rx, rxsize);
1312 } 1434 }
1313#endif 1435#endif
1314 count = 0;
1315 while (!priv->hw->desc->get_rx_owner(p)) { 1436 while (!priv->hw->desc->get_rx_owner(p)) {
1316 int status; 1437 int status;
1317 1438
@@ -1544,10 +1665,37 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
1544 return IRQ_NONE; 1665 return IRQ_NONE;
1545 } 1666 }
1546 1667
1547 if (priv->plat->has_gmac) 1668 /* To handle GMAC own interrupts */
1548 /* To handle GMAC own interrupts */ 1669 if (priv->plat->has_gmac) {
1549 priv->hw->mac->host_irq_status((void __iomem *) dev->base_addr); 1670 int status = priv->hw->mac->host_irq_status((void __iomem *)
1671 dev->base_addr);
1672 if (unlikely(status)) {
1673 if (status & core_mmc_tx_irq)
1674 priv->xstats.mmc_tx_irq_n++;
1675 if (status & core_mmc_rx_irq)
1676 priv->xstats.mmc_rx_irq_n++;
1677 if (status & core_mmc_rx_csum_offload_irq)
1678 priv->xstats.mmc_rx_csum_offload_irq_n++;
1679 if (status & core_irq_receive_pmt_irq)
1680 priv->xstats.irq_receive_pmt_irq_n++;
1681
1682 /* For LPI we need to save the tx status */
1683 if (status & core_irq_tx_path_in_lpi_mode) {
1684 priv->xstats.irq_tx_path_in_lpi_mode_n++;
1685 priv->tx_path_in_lpi_mode = true;
1686 }
1687 if (status & core_irq_tx_path_exit_lpi_mode) {
1688 priv->xstats.irq_tx_path_exit_lpi_mode_n++;
1689 priv->tx_path_in_lpi_mode = false;
1690 }
1691 if (status & core_irq_rx_path_in_lpi_mode)
1692 priv->xstats.irq_rx_path_in_lpi_mode_n++;
1693 if (status & core_irq_rx_path_exit_lpi_mode)
1694 priv->xstats.irq_rx_path_exit_lpi_mode_n++;
1695 }
1696 }
1550 1697
1698 /* To handle DMA interrupts */
1551 stmmac_dma_interrupt(priv); 1699 stmmac_dma_interrupt(priv);
1552 1700
1553 return IRQ_HANDLED; 1701 return IRQ_HANDLED;
@@ -2133,42 +2281,38 @@ static int __init stmmac_cmdline_opt(char *str)
2133 return -EINVAL; 2281 return -EINVAL;
2134 while ((opt = strsep(&str, ",")) != NULL) { 2282 while ((opt = strsep(&str, ",")) != NULL) {
2135 if (!strncmp(opt, "debug:", 6)) { 2283 if (!strncmp(opt, "debug:", 6)) {
2136 if (strict_strtoul(opt + 6, 0, (unsigned long *)&debug)) 2284 if (kstrtoint(opt + 6, 0, &debug))
2137 goto err; 2285 goto err;
2138 } else if (!strncmp(opt, "phyaddr:", 8)) { 2286 } else if (!strncmp(opt, "phyaddr:", 8)) {
2139 if (strict_strtoul(opt + 8, 0, 2287 if (kstrtoint(opt + 8, 0, &phyaddr))
2140 (unsigned long *)&phyaddr))
2141 goto err; 2288 goto err;
2142 } else if (!strncmp(opt, "dma_txsize:", 11)) { 2289 } else if (!strncmp(opt, "dma_txsize:", 11)) {
2143 if (strict_strtoul(opt + 11, 0, 2290 if (kstrtoint(opt + 11, 0, &dma_txsize))
2144 (unsigned long *)&dma_txsize))
2145 goto err; 2291 goto err;
2146 } else if (!strncmp(opt, "dma_rxsize:", 11)) { 2292 } else if (!strncmp(opt, "dma_rxsize:", 11)) {
2147 if (strict_strtoul(opt + 11, 0, 2293 if (kstrtoint(opt + 11, 0, &dma_rxsize))
2148 (unsigned long *)&dma_rxsize))
2149 goto err; 2294 goto err;
2150 } else if (!strncmp(opt, "buf_sz:", 7)) { 2295 } else if (!strncmp(opt, "buf_sz:", 7)) {
2151 if (strict_strtoul(opt + 7, 0, 2296 if (kstrtoint(opt + 7, 0, &buf_sz))
2152 (unsigned long *)&buf_sz))
2153 goto err; 2297 goto err;
2154 } else if (!strncmp(opt, "tc:", 3)) { 2298 } else if (!strncmp(opt, "tc:", 3)) {
2155 if (strict_strtoul(opt + 3, 0, (unsigned long *)&tc)) 2299 if (kstrtoint(opt + 3, 0, &tc))
2156 goto err; 2300 goto err;
2157 } else if (!strncmp(opt, "watchdog:", 9)) { 2301 } else if (!strncmp(opt, "watchdog:", 9)) {
2158 if (strict_strtoul(opt + 9, 0, 2302 if (kstrtoint(opt + 9, 0, &watchdog))
2159 (unsigned long *)&watchdog))
2160 goto err; 2303 goto err;
2161 } else if (!strncmp(opt, "flow_ctrl:", 10)) { 2304 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
2162 if (strict_strtoul(opt + 10, 0, 2305 if (kstrtoint(opt + 10, 0, &flow_ctrl))
2163 (unsigned long *)&flow_ctrl))
2164 goto err; 2306 goto err;
2165 } else if (!strncmp(opt, "pause:", 6)) { 2307 } else if (!strncmp(opt, "pause:", 6)) {
2166 if (strict_strtoul(opt + 6, 0, (unsigned long *)&pause)) 2308 if (kstrtoint(opt + 6, 0, &pause))
2309 goto err;
2310 } else if (!strncmp(opt, "eee_timer:", 6)) {
2311 if (kstrtoint(opt + 10, 0, &eee_timer))
2167 goto err; 2312 goto err;
2168#ifdef CONFIG_STMMAC_TIMER 2313#ifdef CONFIG_STMMAC_TIMER
2169 } else if (!strncmp(opt, "tmrate:", 7)) { 2314 } else if (!strncmp(opt, "tmrate:", 7)) {
2170 if (strict_strtoul(opt + 7, 0, 2315 if (kstrtoint(opt + 7, 0, &tmrate))
2171 (unsigned long *)&tmrate))
2172 goto err; 2316 goto err;
2173#endif 2317#endif
2174 } 2318 }
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index cf826e6b6aa1..13afb8edfadc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -125,7 +125,7 @@ err_out_req_reg_failed:
125} 125}
126 126
127/** 127/**
128 * stmmac_dvr_remove 128 * stmmac_pci_remove
129 * 129 *
130 * @pdev: platform device pointer 130 * @pdev: platform device pointer
131 * Description: this function calls the main to free the net resources 131 * Description: this function calls the main to free the net resources
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 680d2b8dfe27..cd01ee7ecef1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -49,7 +49,9 @@ static int __devinit stmmac_probe_config_dt(struct platform_device *pdev,
49 * are provided. All other properties should be added 49 * are provided. All other properties should be added
50 * once needed on other platforms. 50 * once needed on other platforms.
51 */ 51 */
52 if (of_device_is_compatible(np, "st,spear600-gmac")) { 52 if (of_device_is_compatible(np, "st,spear600-gmac") ||
53 of_device_is_compatible(np, "snps,dwmac-3.70a") ||
54 of_device_is_compatible(np, "snps,dwmac")) {
53 plat->has_gmac = 1; 55 plat->has_gmac = 1;
54 plat->pmt = 1; 56 plat->pmt = 1;
55 } 57 }
@@ -156,6 +158,8 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
156 if (priv->wol_irq == -ENXIO) 158 if (priv->wol_irq == -ENXIO)
157 priv->wol_irq = priv->dev->irq; 159 priv->wol_irq = priv->dev->irq;
158 160
161 priv->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
162
159 platform_set_drvdata(pdev, priv->dev); 163 platform_set_drvdata(pdev, priv->dev);
160 164
161 pr_debug("STMMAC platform driver registration completed"); 165 pr_debug("STMMAC platform driver registration completed");
@@ -190,7 +194,7 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
190 194
191 platform_set_drvdata(pdev, NULL); 195 platform_set_drvdata(pdev, NULL);
192 196
193 iounmap((void *)priv->ioaddr); 197 iounmap((void __force __iomem *)priv->ioaddr);
194 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 198 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
195 release_mem_region(res->start, resource_size(res)); 199 release_mem_region(res->start, resource_size(res));
196 200
@@ -250,7 +254,9 @@ static const struct dev_pm_ops stmmac_pltfr_pm_ops;
250#endif /* CONFIG_PM */ 254#endif /* CONFIG_PM */
251 255
252static const struct of_device_id stmmac_dt_ids[] = { 256static const struct of_device_id stmmac_dt_ids[] = {
253 { .compatible = "st,spear600-gmac", }, 257 { .compatible = "st,spear600-gmac"},
258 { .compatible = "snps,dwmac-3.70a"},
259 { .compatible = "snps,dwmac"},
254 { /* sentinel */ } 260 { /* sentinel */ }
255}; 261};
256MODULE_DEVICE_TABLE(of, stmmac_dt_ids); 262MODULE_DEVICE_TABLE(of, stmmac_dt_ids);
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 2a83fc57edba..967fe8cb476e 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -233,7 +233,6 @@ static void bigmac_init_rings(struct bigmac *bp, int from_irq)
233 continue; 233 continue;
234 234
235 bp->rx_skbs[i] = skb; 235 bp->rx_skbs[i] = skb;
236 skb->dev = dev;
237 236
238 /* Because we reserve afterwards. */ 237 /* Because we reserve afterwards. */
239 skb_put(skb, ETH_FRAME_LEN); 238 skb_put(skb, ETH_FRAME_LEN);
@@ -838,7 +837,6 @@ static void bigmac_rx(struct bigmac *bp)
838 RX_BUF_ALLOC_SIZE - 34, 837 RX_BUF_ALLOC_SIZE - 34,
839 DMA_FROM_DEVICE); 838 DMA_FROM_DEVICE);
840 bp->rx_skbs[elem] = new_skb; 839 bp->rx_skbs[elem] = new_skb;
841 new_skb->dev = bp->dev;
842 skb_put(new_skb, ETH_FRAME_LEN); 840 skb_put(new_skb, ETH_FRAME_LEN);
843 skb_reserve(new_skb, 34); 841 skb_reserve(new_skb, 34);
844 this->rx_addr = 842 this->rx_addr =
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 3cf4ab755838..9ae12d0c9632 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -752,7 +752,6 @@ static __inline__ struct sk_buff *gem_alloc_skb(struct net_device *dev, int size
752 if (likely(skb)) { 752 if (likely(skb)) {
753 unsigned long offset = ALIGNED_RX_SKB_ADDR(skb->data); 753 unsigned long offset = ALIGNED_RX_SKB_ADDR(skb->data);
754 skb_reserve(skb, offset); 754 skb_reserve(skb, offset);
755 skb->dev = dev;
756 } 755 }
757 return skb; 756 return skb;
758} 757}
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index dfc00c4683e5..73f341b8befb 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -1249,7 +1249,6 @@ static void happy_meal_clean_rings(struct happy_meal *hp)
1249static void happy_meal_init_rings(struct happy_meal *hp) 1249static void happy_meal_init_rings(struct happy_meal *hp)
1250{ 1250{
1251 struct hmeal_init_block *hb = hp->happy_block; 1251 struct hmeal_init_block *hb = hp->happy_block;
1252 struct net_device *dev = hp->dev;
1253 int i; 1252 int i;
1254 1253
1255 HMD(("happy_meal_init_rings: counters to zero, ")); 1254 HMD(("happy_meal_init_rings: counters to zero, "));
@@ -1270,7 +1269,6 @@ static void happy_meal_init_rings(struct happy_meal *hp)
1270 continue; 1269 continue;
1271 } 1270 }
1272 hp->rx_skbs[i] = skb; 1271 hp->rx_skbs[i] = skb;
1273 skb->dev = dev;
1274 1272
1275 /* Because we reserve afterwards. */ 1273 /* Because we reserve afterwards. */
1276 skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); 1274 skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
@@ -2031,7 +2029,6 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
2031 } 2029 }
2032 dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE); 2030 dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
2033 hp->rx_skbs[elem] = new_skb; 2031 hp->rx_skbs[elem] = new_skb;
2034 new_skb->dev = dev;
2035 skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); 2032 skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
2036 hme_write_rxd(hp, this, 2033 hme_write_rxd(hp, this,
2037 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), 2034 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index 7d4a040d84a2..aeded7ff1c8f 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -441,7 +441,7 @@ static void qe_rx(struct sunqe *qep)
441 } else { 441 } else {
442 skb_reserve(skb, 2); 442 skb_reserve(skb, 2);
443 skb_put(skb, len); 443 skb_put(skb, len);
444 skb_copy_to_linear_data(skb, (unsigned char *) this_qbuf, 444 skb_copy_to_linear_data(skb, this_qbuf,
445 len); 445 len);
446 skb->protocol = eth_type_trans(skb, qep->dev); 446 skb->protocol = eth_type_trans(skb, qep->dev);
447 netif_rx(skb); 447 netif_rx(skb);
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 447a6932cab3..6ce9edd95c04 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -137,14 +137,15 @@ static void print_eth_id(struct net_device *ndev)
137#define bdx_disable_interrupts(priv) \ 137#define bdx_disable_interrupts(priv) \
138 do { WRITE_REG(priv, regIMR, 0); } while (0) 138 do { WRITE_REG(priv, regIMR, 0); } while (0)
139 139
140/* bdx_fifo_init 140/**
141 * create TX/RX descriptor fifo for host-NIC communication. 141 * bdx_fifo_init - create TX/RX descriptor fifo for host-NIC communication.
142 * @priv: NIC private structure
143 * @f: fifo to initialize
144 * @fsz_type: fifo size type: 0-4KB, 1-8KB, 2-16KB, 3-32KB
145 * @reg_XXX: offsets of registers relative to base address
146 *
142 * 1K extra space is allocated at the end of the fifo to simplify 147 * 1K extra space is allocated at the end of the fifo to simplify
143 * processing of descriptors that wraps around fifo's end 148 * processing of descriptors that wraps around fifo's end
144 * @priv - NIC private structure
145 * @f - fifo to initialize
146 * @fsz_type - fifo size type: 0-4KB, 1-8KB, 2-16KB, 3-32KB
147 * @reg_XXX - offsets of registers relative to base address
148 * 149 *
149 * Returns 0 on success, negative value on failure 150 * Returns 0 on success, negative value on failure
150 * 151 *
@@ -177,9 +178,10 @@ bdx_fifo_init(struct bdx_priv *priv, struct fifo *f, int fsz_type,
177 RET(0); 178 RET(0);
178} 179}
179 180
180/* bdx_fifo_free - free all resources used by fifo 181/**
181 * @priv - NIC private structure 182 * bdx_fifo_free - free all resources used by fifo
182 * @f - fifo to release 183 * @priv: NIC private structure
184 * @f: fifo to release
183 */ 185 */
184static void bdx_fifo_free(struct bdx_priv *priv, struct fifo *f) 186static void bdx_fifo_free(struct bdx_priv *priv, struct fifo *f)
185{ 187{
@@ -192,9 +194,9 @@ static void bdx_fifo_free(struct bdx_priv *priv, struct fifo *f)
192 RET(); 194 RET();
193} 195}
194 196
195/* 197/**
196 * bdx_link_changed - notifies OS about hw link state. 198 * bdx_link_changed - notifies OS about hw link state.
197 * @bdx_priv - hw adapter structure 199 * @priv: hw adapter structure
198 */ 200 */
199static void bdx_link_changed(struct bdx_priv *priv) 201static void bdx_link_changed(struct bdx_priv *priv)
200{ 202{
@@ -233,10 +235,10 @@ static void bdx_isr_extra(struct bdx_priv *priv, u32 isr)
233 235
234} 236}
235 237
236/* bdx_isr - Interrupt Service Routine for Bordeaux NIC 238/**
237 * @irq - interrupt number 239 * bdx_isr_napi - Interrupt Service Routine for Bordeaux NIC
238 * @ndev - network device 240 * @irq: interrupt number
239 * @regs - CPU registers 241 * @dev: network device
240 * 242 *
241 * Return IRQ_NONE if it was not our interrupt, IRQ_HANDLED - otherwise 243 * Return IRQ_NONE if it was not our interrupt, IRQ_HANDLED - otherwise
242 * 244 *
@@ -307,8 +309,10 @@ static int bdx_poll(struct napi_struct *napi, int budget)
307 return work_done; 309 return work_done;
308} 310}
309 311
310/* bdx_fw_load - loads firmware to NIC 312/**
311 * @priv - NIC private structure 313 * bdx_fw_load - loads firmware to NIC
314 * @priv: NIC private structure
315 *
312 * Firmware is loaded via TXD fifo, so it must be initialized first. 316 * Firmware is loaded via TXD fifo, so it must be initialized first.
313 * Firware must be loaded once per NIC not per PCI device provided by NIC (NIC 317 * Firware must be loaded once per NIC not per PCI device provided by NIC (NIC
314 * can have few of them). So all drivers use semaphore register to choose one 318 * can have few of them). So all drivers use semaphore register to choose one
@@ -380,8 +384,9 @@ static void bdx_restore_mac(struct net_device *ndev, struct bdx_priv *priv)
380 RET(); 384 RET();
381} 385}
382 386
383/* bdx_hw_start - inits registers and starts HW's Rx and Tx engines 387/**
384 * @priv - NIC private structure 388 * bdx_hw_start - inits registers and starts HW's Rx and Tx engines
389 * @priv: NIC private structure
385 */ 390 */
386static int bdx_hw_start(struct bdx_priv *priv) 391static int bdx_hw_start(struct bdx_priv *priv)
387{ 392{
@@ -691,12 +696,13 @@ static int bdx_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
691 RET(-EOPNOTSUPP); 696 RET(-EOPNOTSUPP);
692} 697}
693 698
694/* 699/**
695 * __bdx_vlan_rx_vid - private helper for adding/killing VLAN vid 700 * __bdx_vlan_rx_vid - private helper for adding/killing VLAN vid
696 * by passing VLAN filter table to hardware 701 * @ndev: network device
697 * @ndev network device 702 * @vid: VLAN vid
698 * @vid VLAN vid 703 * @op: add or kill operation
699 * @op add or kill operation 704 *
705 * Passes VLAN filter table to hardware
700 */ 706 */
701static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable) 707static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
702{ 708{
@@ -722,10 +728,10 @@ static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
722 RET(); 728 RET();
723} 729}
724 730
725/* 731/**
726 * bdx_vlan_rx_add_vid - kernel hook for adding VLAN vid to hw filtering table 732 * bdx_vlan_rx_add_vid - kernel hook for adding VLAN vid to hw filtering table
727 * @ndev network device 733 * @ndev: network device
728 * @vid VLAN vid to add 734 * @vid: VLAN vid to add
729 */ 735 */
730static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid) 736static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
731{ 737{
@@ -733,10 +739,10 @@ static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
733 return 0; 739 return 0;
734} 740}
735 741
736/* 742/**
737 * bdx_vlan_rx_kill_vid - kernel hook for killing VLAN vid in hw filtering table 743 * bdx_vlan_rx_kill_vid - kernel hook for killing VLAN vid in hw filtering table
738 * @ndev network device 744 * @ndev: network device
739 * @vid VLAN vid to kill 745 * @vid: VLAN vid to kill
740 */ 746 */
741static int bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid) 747static int bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid)
742{ 748{
@@ -974,8 +980,9 @@ static inline void bdx_rxdb_free_elem(struct rxdb *db, int n)
974 * Rx Init * 980 * Rx Init *
975 *************************************************************************/ 981 *************************************************************************/
976 982
977/* bdx_rx_init - initialize RX all related HW and SW resources 983/**
978 * @priv - NIC private structure 984 * bdx_rx_init - initialize RX all related HW and SW resources
985 * @priv: NIC private structure
979 * 986 *
980 * Returns 0 on success, negative value on failure 987 * Returns 0 on success, negative value on failure
981 * 988 *
@@ -1016,9 +1023,10 @@ err_mem:
1016 return -ENOMEM; 1023 return -ENOMEM;
1017} 1024}
1018 1025
1019/* bdx_rx_free_skbs - frees and unmaps all skbs allocated for the fifo 1026/**
1020 * @priv - NIC private structure 1027 * bdx_rx_free_skbs - frees and unmaps all skbs allocated for the fifo
1021 * @f - RXF fifo 1028 * @priv: NIC private structure
1029 * @f: RXF fifo
1022 */ 1030 */
1023static void bdx_rx_free_skbs(struct bdx_priv *priv, struct rxf_fifo *f) 1031static void bdx_rx_free_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
1024{ 1032{
@@ -1045,8 +1053,10 @@ static void bdx_rx_free_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
1045 } 1053 }
1046} 1054}
1047 1055
1048/* bdx_rx_free - release all Rx resources 1056/**
1049 * @priv - NIC private structure 1057 * bdx_rx_free - release all Rx resources
1058 * @priv: NIC private structure
1059 *
1050 * It assumes that Rx is desabled in HW 1060 * It assumes that Rx is desabled in HW
1051 */ 1061 */
1052static void bdx_rx_free(struct bdx_priv *priv) 1062static void bdx_rx_free(struct bdx_priv *priv)
@@ -1067,9 +1077,11 @@ static void bdx_rx_free(struct bdx_priv *priv)
1067 * Rx Engine * 1077 * Rx Engine *
1068 *************************************************************************/ 1078 *************************************************************************/
1069 1079
1070/* bdx_rx_alloc_skbs - fill rxf fifo with new skbs 1080/**
1071 * @priv - nic's private structure 1081 * bdx_rx_alloc_skbs - fill rxf fifo with new skbs
1072 * @f - RXF fifo that needs skbs 1082 * @priv: nic's private structure
1083 * @f: RXF fifo that needs skbs
1084 *
1073 * It allocates skbs, build rxf descs and push it (rxf descr) into rxf fifo. 1085 * It allocates skbs, build rxf descs and push it (rxf descr) into rxf fifo.
1074 * skb's virtual and physical addresses are stored in skb db. 1086 * skb's virtual and physical addresses are stored in skb db.
1075 * To calculate free space, func uses cached values of RPTR and WPTR 1087 * To calculate free space, func uses cached values of RPTR and WPTR
@@ -1179,13 +1191,15 @@ static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd)
1179 RET(); 1191 RET();
1180} 1192}
1181 1193
1182/* bdx_rx_receive - receives full packets from RXD fifo and pass them to OS 1194/**
1195 * bdx_rx_receive - receives full packets from RXD fifo and pass them to OS
1183 * NOTE: a special treatment is given to non-continuous descriptors 1196 * NOTE: a special treatment is given to non-continuous descriptors
1184 * that start near the end, wraps around and continue at the beginning. a second 1197 * that start near the end, wraps around and continue at the beginning. a second
1185 * part is copied right after the first, and then descriptor is interpreted as 1198 * part is copied right after the first, and then descriptor is interpreted as
1186 * normal. fifo has an extra space to allow such operations 1199 * normal. fifo has an extra space to allow such operations
1187 * @priv - nic's private structure 1200 * @priv: nic's private structure
1188 * @f - RXF fifo that needs skbs 1201 * @f: RXF fifo that needs skbs
1202 * @budget: maximum number of packets to receive
1189 */ 1203 */
1190 1204
1191/* TBD: replace memcpy func call by explicite inline asm */ 1205/* TBD: replace memcpy func call by explicite inline asm */
@@ -1375,9 +1389,10 @@ static inline int bdx_tx_db_size(struct txdb *db)
1375 return db->size - taken; 1389 return db->size - taken;
1376} 1390}
1377 1391
1378/* __bdx_tx_ptr_next - helper function, increment read/write pointer + wrap 1392/**
1379 * @d - tx data base 1393 * __bdx_tx_db_ptr_next - helper function, increment read/write pointer + wrap
1380 * @ptr - read or write pointer 1394 * @db: tx data base
1395 * @pptr: read or write pointer
1381 */ 1396 */
1382static inline void __bdx_tx_db_ptr_next(struct txdb *db, struct tx_map **pptr) 1397static inline void __bdx_tx_db_ptr_next(struct txdb *db, struct tx_map **pptr)
1383{ 1398{
@@ -1394,8 +1409,9 @@ static inline void __bdx_tx_db_ptr_next(struct txdb *db, struct tx_map **pptr)
1394 *pptr = db->start; 1409 *pptr = db->start;
1395} 1410}
1396 1411
1397/* bdx_tx_db_inc_rptr - increment read pointer 1412/**
1398 * @d - tx data base 1413 * bdx_tx_db_inc_rptr - increment read pointer
1414 * @db: tx data base
1399 */ 1415 */
1400static inline void bdx_tx_db_inc_rptr(struct txdb *db) 1416static inline void bdx_tx_db_inc_rptr(struct txdb *db)
1401{ 1417{
@@ -1403,8 +1419,9 @@ static inline void bdx_tx_db_inc_rptr(struct txdb *db)
1403 __bdx_tx_db_ptr_next(db, &db->rptr); 1419 __bdx_tx_db_ptr_next(db, &db->rptr);
1404} 1420}
1405 1421
1406/* bdx_tx_db_inc_rptr - increment write pointer 1422/**
1407 * @d - tx data base 1423 * bdx_tx_db_inc_wptr - increment write pointer
1424 * @db: tx data base
1408 */ 1425 */
1409static inline void bdx_tx_db_inc_wptr(struct txdb *db) 1426static inline void bdx_tx_db_inc_wptr(struct txdb *db)
1410{ 1427{
@@ -1413,9 +1430,11 @@ static inline void bdx_tx_db_inc_wptr(struct txdb *db)
1413 a result of write */ 1430 a result of write */
1414} 1431}
1415 1432
1416/* bdx_tx_db_init - creates and initializes tx db 1433/**
1417 * @d - tx data base 1434 * bdx_tx_db_init - creates and initializes tx db
1418 * @sz_type - size of tx fifo 1435 * @d: tx data base
1436 * @sz_type: size of tx fifo
1437 *
1419 * Returns 0 on success, error code otherwise 1438 * Returns 0 on success, error code otherwise
1420 */ 1439 */
1421static int bdx_tx_db_init(struct txdb *d, int sz_type) 1440static int bdx_tx_db_init(struct txdb *d, int sz_type)
@@ -1441,8 +1460,9 @@ static int bdx_tx_db_init(struct txdb *d, int sz_type)
1441 return 0; 1460 return 0;
1442} 1461}
1443 1462
1444/* bdx_tx_db_close - closes tx db and frees all memory 1463/**
1445 * @d - tx data base 1464 * bdx_tx_db_close - closes tx db and frees all memory
1465 * @d: tx data base
1446 */ 1466 */
1447static void bdx_tx_db_close(struct txdb *d) 1467static void bdx_tx_db_close(struct txdb *d)
1448{ 1468{
@@ -1463,9 +1483,11 @@ static struct {
1463 u16 qwords; /* qword = 64 bit */ 1483 u16 qwords; /* qword = 64 bit */
1464} txd_sizes[MAX_SKB_FRAGS + 1]; 1484} txd_sizes[MAX_SKB_FRAGS + 1];
1465 1485
1466/* txdb_map_skb - creates and stores dma mappings for skb's data blocks 1486/**
1467 * @priv - NIC private structure 1487 * bdx_tx_map_skb - creates and stores dma mappings for skb's data blocks
1468 * @skb - socket buffer to map 1488 * @priv: NIC private structure
1489 * @skb: socket buffer to map
1490 * @txdd: TX descriptor to use
1469 * 1491 *
1470 * It makes dma mappings for skb's data blocks and writes them to PBL of 1492 * It makes dma mappings for skb's data blocks and writes them to PBL of
1471 * new tx descriptor. It also stores them in the tx db, so they could be 1493 * new tx descriptor. It also stores them in the tx db, so they could be
@@ -1562,9 +1584,10 @@ err_mem:
1562 return -ENOMEM; 1584 return -ENOMEM;
1563} 1585}
1564 1586
1565/* 1587/**
1566 * bdx_tx_space - calculates available space in TX fifo 1588 * bdx_tx_space - calculates available space in TX fifo
1567 * @priv - NIC private structure 1589 * @priv: NIC private structure
1590 *
1568 * Returns available space in TX fifo in bytes 1591 * Returns available space in TX fifo in bytes
1569 */ 1592 */
1570static inline int bdx_tx_space(struct bdx_priv *priv) 1593static inline int bdx_tx_space(struct bdx_priv *priv)
@@ -1579,9 +1602,10 @@ static inline int bdx_tx_space(struct bdx_priv *priv)
1579 return fsize; 1602 return fsize;
1580} 1603}
1581 1604
1582/* bdx_tx_transmit - send packet to NIC 1605/**
1583 * @skb - packet to send 1606 * bdx_tx_transmit - send packet to NIC
1584 * ndev - network device assigned to NIC 1607 * @skb: packet to send
1608 * @ndev: network device assigned to NIC
1585 * Return codes: 1609 * Return codes:
1586 * o NETDEV_TX_OK everything ok. 1610 * o NETDEV_TX_OK everything ok.
1587 * o NETDEV_TX_BUSY Cannot transmit packet, try later 1611 * o NETDEV_TX_BUSY Cannot transmit packet, try later
@@ -1699,8 +1723,10 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
1699 return NETDEV_TX_OK; 1723 return NETDEV_TX_OK;
1700} 1724}
1701 1725
1702/* bdx_tx_cleanup - clean TXF fifo, run in the context of IRQ. 1726/**
1703 * @priv - bdx adapter 1727 * bdx_tx_cleanup - clean TXF fifo, run in the context of IRQ.
1728 * @priv: bdx adapter
1729 *
1704 * It scans TXF fifo for descriptors, frees DMA mappings and reports to OS 1730 * It scans TXF fifo for descriptors, frees DMA mappings and reports to OS
1705 * that those packets were sent 1731 * that those packets were sent
1706 */ 1732 */
@@ -1761,7 +1787,8 @@ static void bdx_tx_cleanup(struct bdx_priv *priv)
1761 spin_unlock(&priv->tx_lock); 1787 spin_unlock(&priv->tx_lock);
1762} 1788}
1763 1789
1764/* bdx_tx_free_skbs - frees all skbs from TXD fifo. 1790/**
1791 * bdx_tx_free_skbs - frees all skbs from TXD fifo.
1765 * It gets called when OS stops this dev, eg upon "ifconfig down" or rmmod 1792 * It gets called when OS stops this dev, eg upon "ifconfig down" or rmmod
1766 */ 1793 */
1767static void bdx_tx_free_skbs(struct bdx_priv *priv) 1794static void bdx_tx_free_skbs(struct bdx_priv *priv)
@@ -1790,10 +1817,11 @@ static void bdx_tx_free(struct bdx_priv *priv)
1790 bdx_tx_db_close(&priv->txdb); 1817 bdx_tx_db_close(&priv->txdb);
1791} 1818}
1792 1819
1793/* bdx_tx_push_desc - push descriptor to TxD fifo 1820/**
1794 * @priv - NIC private structure 1821 * bdx_tx_push_desc - push descriptor to TxD fifo
1795 * @data - desc's data 1822 * @priv: NIC private structure
1796 * @size - desc's size 1823 * @data: desc's data
1824 * @size: desc's size
1797 * 1825 *
1798 * Pushes desc to TxD fifo and overlaps it if needed. 1826 * Pushes desc to TxD fifo and overlaps it if needed.
1799 * NOTE: this func does not check for available space. this is responsibility 1827 * NOTE: this func does not check for available space. this is responsibility
@@ -1819,10 +1847,11 @@ static void bdx_tx_push_desc(struct bdx_priv *priv, void *data, int size)
1819 WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR); 1847 WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1820} 1848}
1821 1849
1822/* bdx_tx_push_desc_safe - push descriptor to TxD fifo in a safe way 1850/**
1823 * @priv - NIC private structure 1851 * bdx_tx_push_desc_safe - push descriptor to TxD fifo in a safe way
1824 * @data - desc's data 1852 * @priv: NIC private structure
1825 * @size - desc's size 1853 * @data: desc's data
1854 * @size: desc's size
1826 * 1855 *
1827 * NOTE: this func does check for available space and, if necessary, waits for 1856 * NOTE: this func does check for available space and, if necessary, waits for
1828 * NIC to read existing data before writing new one. 1857 * NIC to read existing data before writing new one.
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 6685bbb5705a..1e5d85b06e71 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -27,6 +27,7 @@
27#include <linux/phy.h> 27#include <linux/phy.h>
28#include <linux/workqueue.h> 28#include <linux/workqueue.h>
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/pm_runtime.h>
30 31
31#include <linux/platform_data/cpsw.h> 32#include <linux/platform_data/cpsw.h>
32 33
@@ -494,11 +495,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
494 cpsw_intr_disable(priv); 495 cpsw_intr_disable(priv);
495 netif_carrier_off(ndev); 496 netif_carrier_off(ndev);
496 497
497 ret = clk_enable(priv->clk); 498 pm_runtime_get_sync(&priv->pdev->dev);
498 if (ret < 0) {
499 dev_err(priv->dev, "unable to turn on device clock\n");
500 return ret;
501 }
502 499
503 reg = __raw_readl(&priv->regs->id_ver); 500 reg = __raw_readl(&priv->regs->id_ver);
504 501
@@ -569,7 +566,7 @@ static int cpsw_ndo_stop(struct net_device *ndev)
569 netif_carrier_off(priv->ndev); 566 netif_carrier_off(priv->ndev);
570 cpsw_ale_stop(priv->ale); 567 cpsw_ale_stop(priv->ale);
571 for_each_slave(priv, cpsw_slave_stop, priv); 568 for_each_slave(priv, cpsw_slave_stop, priv);
572 clk_disable(priv->clk); 569 pm_runtime_put_sync(&priv->pdev->dev);
573 return 0; 570 return 0;
574} 571}
575 572
@@ -748,7 +745,7 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
748 memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN); 745 memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
749 pr_info("Detected MACID = %pM", priv->mac_addr); 746 pr_info("Detected MACID = %pM", priv->mac_addr);
750 } else { 747 } else {
751 random_ether_addr(priv->mac_addr); 748 eth_random_addr(priv->mac_addr);
752 pr_info("Random MACID = %pM", priv->mac_addr); 749 pr_info("Random MACID = %pM", priv->mac_addr);
753 } 750 }
754 751
@@ -763,10 +760,12 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
763 for (i = 0; i < data->slaves; i++) 760 for (i = 0; i < data->slaves; i++)
764 priv->slaves[i].slave_num = i; 761 priv->slaves[i].slave_num = i;
765 762
766 priv->clk = clk_get(&pdev->dev, NULL); 763 pm_runtime_enable(&pdev->dev);
764 priv->clk = clk_get(&pdev->dev, "fck");
767 if (IS_ERR(priv->clk)) { 765 if (IS_ERR(priv->clk)) {
768 dev_err(priv->dev, "failed to get device clock)\n"); 766 dev_err(&pdev->dev, "fck is not found\n");
769 ret = -EBUSY; 767 ret = -ENODEV;
768 goto clean_slave_ret;
770 } 769 }
771 770
772 priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 771 priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -935,6 +934,8 @@ clean_cpsw_iores_ret:
935 resource_size(priv->cpsw_res)); 934 resource_size(priv->cpsw_res));
936clean_clk_ret: 935clean_clk_ret:
937 clk_put(priv->clk); 936 clk_put(priv->clk);
937clean_slave_ret:
938 pm_runtime_disable(&pdev->dev);
938 kfree(priv->slaves); 939 kfree(priv->slaves);
939clean_ndev_ret: 940clean_ndev_ret:
940 free_netdev(ndev); 941 free_netdev(ndev);
@@ -959,6 +960,7 @@ static int __devexit cpsw_remove(struct platform_device *pdev)
959 resource_size(priv->cpsw_res)); 960 resource_size(priv->cpsw_res));
960 release_mem_region(priv->cpsw_ss_res->start, 961 release_mem_region(priv->cpsw_ss_res->start,
961 resource_size(priv->cpsw_ss_res)); 962 resource_size(priv->cpsw_ss_res));
963 pm_runtime_disable(&pdev->dev);
962 clk_put(priv->clk); 964 clk_put(priv->clk);
963 kfree(priv->slaves); 965 kfree(priv->slaves);
964 free_netdev(ndev); 966 free_netdev(ndev);
@@ -973,6 +975,8 @@ static int cpsw_suspend(struct device *dev)
973 975
974 if (netif_running(ndev)) 976 if (netif_running(ndev))
975 cpsw_ndo_stop(ndev); 977 cpsw_ndo_stop(ndev);
978 pm_runtime_put_sync(&pdev->dev);
979
976 return 0; 980 return 0;
977} 981}
978 982
@@ -981,6 +985,7 @@ static int cpsw_resume(struct device *dev)
981 struct platform_device *pdev = to_platform_device(dev); 985 struct platform_device *pdev = to_platform_device(dev);
982 struct net_device *ndev = platform_get_drvdata(pdev); 986 struct net_device *ndev = platform_get_drvdata(pdev);
983 987
988 pm_runtime_get_sync(&pdev->dev);
984 if (netif_running(ndev)) 989 if (netif_running(ndev))
985 cpsw_ndo_open(ndev); 990 cpsw_ndo_open(ndev);
986 return 0; 991 return 0;
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 4da93a5d7ec6..b298ab071e3d 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -58,6 +58,12 @@
58#include <linux/io.h> 58#include <linux/io.h>
59#include <linux/uaccess.h> 59#include <linux/uaccess.h>
60#include <linux/davinci_emac.h> 60#include <linux/davinci_emac.h>
61#include <linux/of.h>
62#include <linux/of_address.h>
63#include <linux/of_irq.h>
64#include <linux/of_net.h>
65
66#include <mach/mux.h>
61 67
62#include <asm/irq.h> 68#include <asm/irq.h>
63#include <asm/page.h> 69#include <asm/page.h>
@@ -339,6 +345,9 @@ struct emac_priv {
339 u32 rx_addr_type; 345 u32 rx_addr_type;
340 atomic_t cur_tx; 346 atomic_t cur_tx;
341 const char *phy_id; 347 const char *phy_id;
348#ifdef CONFIG_OF
349 struct device_node *phy_node;
350#endif
342 struct phy_device *phydev; 351 struct phy_device *phydev;
343 spinlock_t lock; 352 spinlock_t lock;
344 /*platform specific members*/ 353 /*platform specific members*/
@@ -375,7 +384,7 @@ static char *emac_rxhost_errcodes[16] = {
375#define emac_ctrl_write(reg, val) iowrite32(val, (priv->ctrl_base + (reg))) 384#define emac_ctrl_write(reg, val) iowrite32(val, (priv->ctrl_base + (reg)))
376 385
377/** 386/**
378 * emac_dump_regs: Dump important EMAC registers to debug terminal 387 * emac_dump_regs - Dump important EMAC registers to debug terminal
379 * @priv: The DaVinci EMAC private adapter structure 388 * @priv: The DaVinci EMAC private adapter structure
380 * 389 *
381 * Executes ethtool set cmd & sets phy mode 390 * Executes ethtool set cmd & sets phy mode
@@ -466,7 +475,7 @@ static void emac_dump_regs(struct emac_priv *priv)
466} 475}
467 476
468/** 477/**
469 * emac_get_drvinfo: Get EMAC driver information 478 * emac_get_drvinfo - Get EMAC driver information
470 * @ndev: The DaVinci EMAC network adapter 479 * @ndev: The DaVinci EMAC network adapter
471 * @info: ethtool info structure containing name and version 480 * @info: ethtool info structure containing name and version
472 * 481 *
@@ -481,7 +490,7 @@ static void emac_get_drvinfo(struct net_device *ndev,
481} 490}
482 491
483/** 492/**
484 * emac_get_settings: Get EMAC settings 493 * emac_get_settings - Get EMAC settings
485 * @ndev: The DaVinci EMAC network adapter 494 * @ndev: The DaVinci EMAC network adapter
486 * @ecmd: ethtool command 495 * @ecmd: ethtool command
487 * 496 *
@@ -500,7 +509,7 @@ static int emac_get_settings(struct net_device *ndev,
500} 509}
501 510
502/** 511/**
503 * emac_set_settings: Set EMAC settings 512 * emac_set_settings - Set EMAC settings
504 * @ndev: The DaVinci EMAC network adapter 513 * @ndev: The DaVinci EMAC network adapter
505 * @ecmd: ethtool command 514 * @ecmd: ethtool command
506 * 515 *
@@ -518,7 +527,7 @@ static int emac_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
518} 527}
519 528
520/** 529/**
521 * emac_get_coalesce : Get interrupt coalesce settings for this device 530 * emac_get_coalesce - Get interrupt coalesce settings for this device
522 * @ndev : The DaVinci EMAC network adapter 531 * @ndev : The DaVinci EMAC network adapter
523 * @coal : ethtool coalesce settings structure 532 * @coal : ethtool coalesce settings structure
524 * 533 *
@@ -536,7 +545,7 @@ static int emac_get_coalesce(struct net_device *ndev,
536} 545}
537 546
538/** 547/**
539 * emac_set_coalesce : Set interrupt coalesce settings for this device 548 * emac_set_coalesce - Set interrupt coalesce settings for this device
540 * @ndev : The DaVinci EMAC network adapter 549 * @ndev : The DaVinci EMAC network adapter
541 * @coal : ethtool coalesce settings structure 550 * @coal : ethtool coalesce settings structure
542 * 551 *
@@ -614,11 +623,9 @@ static int emac_set_coalesce(struct net_device *ndev,
614} 623}
615 624
616 625
617/** 626/* ethtool_ops: DaVinci EMAC Ethtool structure
618 * ethtool_ops: DaVinci EMAC Ethtool structure
619 * 627 *
620 * Ethtool support for EMAC adapter 628 * Ethtool support for EMAC adapter
621 *
622 */ 629 */
623static const struct ethtool_ops ethtool_ops = { 630static const struct ethtool_ops ethtool_ops = {
624 .get_drvinfo = emac_get_drvinfo, 631 .get_drvinfo = emac_get_drvinfo,
@@ -631,7 +638,7 @@ static const struct ethtool_ops ethtool_ops = {
631}; 638};
632 639
633/** 640/**
634 * emac_update_phystatus: Update Phy status 641 * emac_update_phystatus - Update Phy status
635 * @priv: The DaVinci EMAC private adapter structure 642 * @priv: The DaVinci EMAC private adapter structure
636 * 643 *
637 * Updates phy status and takes action for network queue if required 644 * Updates phy status and takes action for network queue if required
@@ -697,7 +704,7 @@ static void emac_update_phystatus(struct emac_priv *priv)
697} 704}
698 705
699/** 706/**
700 * hash_get: Calculate hash value from mac address 707 * hash_get - Calculate hash value from mac address
701 * @addr: mac address to delete from hash table 708 * @addr: mac address to delete from hash table
702 * 709 *
703 * Calculates hash value from mac address 710 * Calculates hash value from mac address
@@ -723,9 +730,9 @@ static u32 hash_get(u8 *addr)
723} 730}
724 731
725/** 732/**
726 * hash_add: Hash function to add mac addr from hash table 733 * hash_add - Hash function to add mac addr from hash table
727 * @priv: The DaVinci EMAC private adapter structure 734 * @priv: The DaVinci EMAC private adapter structure
728 * mac_addr: mac address to delete from hash table 735 * @mac_addr: mac address to delete from hash table
729 * 736 *
730 * Adds mac address to the internal hash table 737 * Adds mac address to the internal hash table
731 * 738 *
@@ -765,9 +772,9 @@ static int hash_add(struct emac_priv *priv, u8 *mac_addr)
765} 772}
766 773
767/** 774/**
768 * hash_del: Hash function to delete mac addr from hash table 775 * hash_del - Hash function to delete mac addr from hash table
769 * @priv: The DaVinci EMAC private adapter structure 776 * @priv: The DaVinci EMAC private adapter structure
770 * mac_addr: mac address to delete from hash table 777 * @mac_addr: mac address to delete from hash table
771 * 778 *
772 * Removes mac address from the internal hash table 779 * Removes mac address from the internal hash table
773 * 780 *
@@ -807,7 +814,7 @@ static int hash_del(struct emac_priv *priv, u8 *mac_addr)
807#define EMAC_ALL_MULTI_CLR 3 814#define EMAC_ALL_MULTI_CLR 3
808 815
809/** 816/**
810 * emac_add_mcast: Set multicast address in the EMAC adapter (Internal) 817 * emac_add_mcast - Set multicast address in the EMAC adapter (Internal)
811 * @priv: The DaVinci EMAC private adapter structure 818 * @priv: The DaVinci EMAC private adapter structure
812 * @action: multicast operation to perform 819 * @action: multicast operation to perform
813 * mac_addr: mac address to set 820 * mac_addr: mac address to set
@@ -855,7 +862,7 @@ static void emac_add_mcast(struct emac_priv *priv, u32 action, u8 *mac_addr)
855} 862}
856 863
857/** 864/**
858 * emac_dev_mcast_set: Set multicast address in the EMAC adapter 865 * emac_dev_mcast_set - Set multicast address in the EMAC adapter
859 * @ndev: The DaVinci EMAC network adapter 866 * @ndev: The DaVinci EMAC network adapter
860 * 867 *
861 * Set multicast addresses in EMAC adapter 868 * Set multicast addresses in EMAC adapter
@@ -901,7 +908,7 @@ static void emac_dev_mcast_set(struct net_device *ndev)
901 *************************************************************************/ 908 *************************************************************************/
902 909
903/** 910/**
904 * emac_int_disable: Disable EMAC module interrupt (from adapter) 911 * emac_int_disable - Disable EMAC module interrupt (from adapter)
905 * @priv: The DaVinci EMAC private adapter structure 912 * @priv: The DaVinci EMAC private adapter structure
906 * 913 *
907 * Disable EMAC interrupt on the adapter 914 * Disable EMAC interrupt on the adapter
@@ -931,7 +938,7 @@ static void emac_int_disable(struct emac_priv *priv)
931} 938}
932 939
933/** 940/**
934 * emac_int_enable: Enable EMAC module interrupt (from adapter) 941 * emac_int_enable - Enable EMAC module interrupt (from adapter)
935 * @priv: The DaVinci EMAC private adapter structure 942 * @priv: The DaVinci EMAC private adapter structure
936 * 943 *
937 * Enable EMAC interrupt on the adapter 944 * Enable EMAC interrupt on the adapter
@@ -967,7 +974,7 @@ static void emac_int_enable(struct emac_priv *priv)
967} 974}
968 975
969/** 976/**
970 * emac_irq: EMAC interrupt handler 977 * emac_irq - EMAC interrupt handler
971 * @irq: interrupt number 978 * @irq: interrupt number
972 * @dev_id: EMAC network adapter data structure ptr 979 * @dev_id: EMAC network adapter data structure ptr
973 * 980 *
@@ -1060,7 +1067,7 @@ static void emac_tx_handler(void *token, int len, int status)
1060} 1067}
1061 1068
1062/** 1069/**
1063 * emac_dev_xmit: EMAC Transmit function 1070 * emac_dev_xmit - EMAC Transmit function
1064 * @skb: SKB pointer 1071 * @skb: SKB pointer
1065 * @ndev: The DaVinci EMAC network adapter 1072 * @ndev: The DaVinci EMAC network adapter
1066 * 1073 *
@@ -1111,7 +1118,7 @@ fail_tx:
1111} 1118}
1112 1119
1113/** 1120/**
1114 * emac_dev_tx_timeout: EMAC Transmit timeout function 1121 * emac_dev_tx_timeout - EMAC Transmit timeout function
1115 * @ndev: The DaVinci EMAC network adapter 1122 * @ndev: The DaVinci EMAC network adapter
1116 * 1123 *
1117 * Called when system detects that a skb timeout period has expired 1124 * Called when system detects that a skb timeout period has expired
@@ -1138,7 +1145,7 @@ static void emac_dev_tx_timeout(struct net_device *ndev)
1138} 1145}
1139 1146
1140/** 1147/**
1141 * emac_set_type0addr: Set EMAC Type0 mac address 1148 * emac_set_type0addr - Set EMAC Type0 mac address
1142 * @priv: The DaVinci EMAC private adapter structure 1149 * @priv: The DaVinci EMAC private adapter structure
1143 * @ch: RX channel number 1150 * @ch: RX channel number
1144 * @mac_addr: MAC address to set in device 1151 * @mac_addr: MAC address to set in device
@@ -1165,7 +1172,7 @@ static void emac_set_type0addr(struct emac_priv *priv, u32 ch, char *mac_addr)
1165} 1172}
1166 1173
1167/** 1174/**
1168 * emac_set_type1addr: Set EMAC Type1 mac address 1175 * emac_set_type1addr - Set EMAC Type1 mac address
1169 * @priv: The DaVinci EMAC private adapter structure 1176 * @priv: The DaVinci EMAC private adapter structure
1170 * @ch: RX channel number 1177 * @ch: RX channel number
1171 * @mac_addr: MAC address to set in device 1178 * @mac_addr: MAC address to set in device
@@ -1187,7 +1194,7 @@ static void emac_set_type1addr(struct emac_priv *priv, u32 ch, char *mac_addr)
1187} 1194}
1188 1195
1189/** 1196/**
1190 * emac_set_type2addr: Set EMAC Type2 mac address 1197 * emac_set_type2addr - Set EMAC Type2 mac address
1191 * @priv: The DaVinci EMAC private adapter structure 1198 * @priv: The DaVinci EMAC private adapter structure
1192 * @ch: RX channel number 1199 * @ch: RX channel number
1193 * @mac_addr: MAC address to set in device 1200 * @mac_addr: MAC address to set in device
@@ -1213,7 +1220,7 @@ static void emac_set_type2addr(struct emac_priv *priv, u32 ch,
1213} 1220}
1214 1221
1215/** 1222/**
1216 * emac_setmac: Set mac address in the adapter (internal function) 1223 * emac_setmac - Set mac address in the adapter (internal function)
1217 * @priv: The DaVinci EMAC private adapter structure 1224 * @priv: The DaVinci EMAC private adapter structure
1218 * @ch: RX channel number 1225 * @ch: RX channel number
1219 * @mac_addr: MAC address to set in device 1226 * @mac_addr: MAC address to set in device
@@ -1242,7 +1249,7 @@ static void emac_setmac(struct emac_priv *priv, u32 ch, char *mac_addr)
1242} 1249}
1243 1250
1244/** 1251/**
1245 * emac_dev_setmac_addr: Set mac address in the adapter 1252 * emac_dev_setmac_addr - Set mac address in the adapter
1246 * @ndev: The DaVinci EMAC network adapter 1253 * @ndev: The DaVinci EMAC network adapter
1247 * @addr: MAC address to set in device 1254 * @addr: MAC address to set in device
1248 * 1255 *
@@ -1277,7 +1284,7 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
1277} 1284}
1278 1285
1279/** 1286/**
1280 * emac_hw_enable: Enable EMAC hardware for packet transmission/reception 1287 * emac_hw_enable - Enable EMAC hardware for packet transmission/reception
1281 * @priv: The DaVinci EMAC private adapter structure 1288 * @priv: The DaVinci EMAC private adapter structure
1282 * 1289 *
1283 * Enables EMAC hardware for packet processing - enables PHY, enables RX 1290 * Enables EMAC hardware for packet processing - enables PHY, enables RX
@@ -1347,7 +1354,7 @@ static int emac_hw_enable(struct emac_priv *priv)
1347} 1354}
1348 1355
1349/** 1356/**
1350 * emac_poll: EMAC NAPI Poll function 1357 * emac_poll - EMAC NAPI Poll function
1351 * @ndev: The DaVinci EMAC network adapter 1358 * @ndev: The DaVinci EMAC network adapter
1352 * @budget: Number of receive packets to process (as told by NAPI layer) 1359 * @budget: Number of receive packets to process (as told by NAPI layer)
1353 * 1360 *
@@ -1430,7 +1437,7 @@ static int emac_poll(struct napi_struct *napi, int budget)
1430 1437
1431#ifdef CONFIG_NET_POLL_CONTROLLER 1438#ifdef CONFIG_NET_POLL_CONTROLLER
1432/** 1439/**
1433 * emac_poll_controller: EMAC Poll controller function 1440 * emac_poll_controller - EMAC Poll controller function
1434 * @ndev: The DaVinci EMAC network adapter 1441 * @ndev: The DaVinci EMAC network adapter
1435 * 1442 *
1436 * Polled functionality used by netconsole and others in non interrupt mode 1443 * Polled functionality used by netconsole and others in non interrupt mode
@@ -1489,7 +1496,7 @@ static void emac_adjust_link(struct net_device *ndev)
1489 *************************************************************************/ 1496 *************************************************************************/
1490 1497
1491/** 1498/**
1492 * emac_devioctl: EMAC adapter ioctl 1499 * emac_devioctl - EMAC adapter ioctl
1493 * @ndev: The DaVinci EMAC network adapter 1500 * @ndev: The DaVinci EMAC network adapter
1494 * @ifrq: request parameter 1501 * @ifrq: request parameter
1495 * @cmd: command parameter 1502 * @cmd: command parameter
@@ -1516,7 +1523,7 @@ static int match_first_device(struct device *dev, void *data)
1516} 1523}
1517 1524
1518/** 1525/**
1519 * emac_dev_open: EMAC device open 1526 * emac_dev_open - EMAC device open
1520 * @ndev: The DaVinci EMAC network adapter 1527 * @ndev: The DaVinci EMAC network adapter
1521 * 1528 *
1522 * Called when system wants to start the interface. We init TX/RX channels 1529 * Called when system wants to start the interface. We init TX/RX channels
@@ -1649,7 +1656,7 @@ rollback:
1649} 1656}
1650 1657
1651/** 1658/**
1652 * emac_dev_stop: EMAC device stop 1659 * emac_dev_stop - EMAC device stop
1653 * @ndev: The DaVinci EMAC network adapter 1660 * @ndev: The DaVinci EMAC network adapter
1654 * 1661 *
1655 * Called when system wants to stop or down the interface. We stop the network 1662 * Called when system wants to stop or down the interface. We stop the network
@@ -1691,7 +1698,7 @@ static int emac_dev_stop(struct net_device *ndev)
1691} 1698}
1692 1699
1693/** 1700/**
1694 * emac_dev_getnetstats: EMAC get statistics function 1701 * emac_dev_getnetstats - EMAC get statistics function
1695 * @ndev: The DaVinci EMAC network adapter 1702 * @ndev: The DaVinci EMAC network adapter
1696 * 1703 *
1697 * Called when system wants to get statistics from the device. 1704 * Called when system wants to get statistics from the device.
@@ -1762,8 +1769,79 @@ static const struct net_device_ops emac_netdev_ops = {
1762#endif 1769#endif
1763}; 1770};
1764 1771
1772#ifdef CONFIG_OF
1773static struct emac_platform_data
1774 *davinci_emac_of_get_pdata(struct platform_device *pdev,
1775 struct emac_priv *priv)
1776{
1777 struct device_node *np;
1778 struct emac_platform_data *pdata = NULL;
1779 const u8 *mac_addr;
1780 u32 data;
1781 int ret;
1782
1783 pdata = pdev->dev.platform_data;
1784 if (!pdata) {
1785 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1786 if (!pdata)
1787 goto nodata;
1788 }
1789
1790 np = pdev->dev.of_node;
1791 if (!np)
1792 goto nodata;
1793 else
1794 pdata->version = EMAC_VERSION_2;
1795
1796 if (!is_valid_ether_addr(pdata->mac_addr)) {
1797 mac_addr = of_get_mac_address(np);
1798 if (mac_addr)
1799 memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
1800 }
1801
1802 ret = of_property_read_u32(np, "ti,davinci-ctrl-reg-offset", &data);
1803 if (!ret)
1804 pdata->ctrl_reg_offset = data;
1805
1806 ret = of_property_read_u32(np, "ti,davinci-ctrl-mod-reg-offset",
1807 &data);
1808 if (!ret)
1809 pdata->ctrl_mod_reg_offset = data;
1810
1811 ret = of_property_read_u32(np, "ti,davinci-ctrl-ram-offset", &data);
1812 if (!ret)
1813 pdata->ctrl_ram_offset = data;
1814
1815 ret = of_property_read_u32(np, "ti,davinci-ctrl-ram-size", &data);
1816 if (!ret)
1817 pdata->ctrl_ram_size = data;
1818
1819 ret = of_property_read_u32(np, "ti,davinci-rmii-en", &data);
1820 if (!ret)
1821 pdata->rmii_en = data;
1822
1823 ret = of_property_read_u32(np, "ti,davinci-no-bd-ram", &data);
1824 if (!ret)
1825 pdata->no_bd_ram = data;
1826
1827 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
1828 if (!priv->phy_node)
1829 pdata->phy_id = "";
1830
1831 pdev->dev.platform_data = pdata;
1832nodata:
1833 return pdata;
1834}
1835#else
1836static struct emac_platform_data
1837 *davinci_emac_of_get_pdata(struct platform_device *pdev,
1838 struct emac_priv *priv)
1839{
1840 return pdev->dev.platform_data;
1841}
1842#endif
1765/** 1843/**
1766 * davinci_emac_probe: EMAC device probe 1844 * davinci_emac_probe - EMAC device probe
1767 * @pdev: The DaVinci EMAC device that we are removing 1845 * @pdev: The DaVinci EMAC device that we are removing
1768 * 1846 *
1769 * Called when probing for emac devicesr. We get details of instances and 1847 * Called when probing for emac devicesr. We get details of instances and
@@ -1804,7 +1882,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
1804 1882
1805 spin_lock_init(&priv->lock); 1883 spin_lock_init(&priv->lock);
1806 1884
1807 pdata = pdev->dev.platform_data; 1885 pdata = davinci_emac_of_get_pdata(pdev, priv);
1808 if (!pdata) { 1886 if (!pdata) {
1809 dev_err(&pdev->dev, "no platform data\n"); 1887 dev_err(&pdev->dev, "no platform data\n");
1810 rc = -ENODEV; 1888 rc = -ENODEV;
@@ -1949,7 +2027,7 @@ free_clk:
1949} 2027}
1950 2028
1951/** 2029/**
1952 * davinci_emac_remove: EMAC device remove 2030 * davinci_emac_remove - EMAC device remove
1953 * @pdev: The DaVinci EMAC device that we are removing 2031 * @pdev: The DaVinci EMAC device that we are removing
1954 * 2032 *
1955 * Called when removing the device driver. We disable clock usage and release 2033 * Called when removing the device driver. We disable clock usage and release
@@ -2015,21 +2093,26 @@ static const struct dev_pm_ops davinci_emac_pm_ops = {
2015 .resume = davinci_emac_resume, 2093 .resume = davinci_emac_resume,
2016}; 2094};
2017 2095
2018/** 2096static const struct of_device_id davinci_emac_of_match[] = {
2019 * davinci_emac_driver: EMAC platform driver structure 2097 {.compatible = "ti,davinci-dm6467-emac", },
2020 */ 2098 {},
2099};
2100MODULE_DEVICE_TABLE(of, davinci_emac_of_match);
2101
2102/* davinci_emac_driver: EMAC platform driver structure */
2021static struct platform_driver davinci_emac_driver = { 2103static struct platform_driver davinci_emac_driver = {
2022 .driver = { 2104 .driver = {
2023 .name = "davinci_emac", 2105 .name = "davinci_emac",
2024 .owner = THIS_MODULE, 2106 .owner = THIS_MODULE,
2025 .pm = &davinci_emac_pm_ops, 2107 .pm = &davinci_emac_pm_ops,
2108 .of_match_table = of_match_ptr(davinci_emac_of_match),
2026 }, 2109 },
2027 .probe = davinci_emac_probe, 2110 .probe = davinci_emac_probe,
2028 .remove = __devexit_p(davinci_emac_remove), 2111 .remove = __devexit_p(davinci_emac_remove),
2029}; 2112};
2030 2113
2031/** 2114/**
2032 * davinci_emac_init: EMAC driver module init 2115 * davinci_emac_init - EMAC driver module init
2033 * 2116 *
2034 * Called when initializing the driver. We register the driver with 2117 * Called when initializing the driver. We register the driver with
2035 * the platform. 2118 * the platform.
@@ -2041,7 +2124,7 @@ static int __init davinci_emac_init(void)
2041late_initcall(davinci_emac_init); 2124late_initcall(davinci_emac_init);
2042 2125
2043/** 2126/**
2044 * davinci_emac_exit: EMAC driver module exit 2127 * davinci_emac_exit - EMAC driver module exit
2045 * 2128 *
2046 * Called when exiting the driver completely. We unregister the driver with 2129 * Called when exiting the driver completely. We unregister the driver with
2047 * the platform and exit 2130 * the platform and exit
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index e4e47088e26b..cd7ee204e94a 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -34,6 +34,7 @@
34#include <linux/clk.h> 34#include <linux/clk.h>
35#include <linux/err.h> 35#include <linux/err.h>
36#include <linux/io.h> 36#include <linux/io.h>
37#include <linux/pm_runtime.h>
37#include <linux/davinci_emac.h> 38#include <linux/davinci_emac.h>
38 39
39/* 40/*
@@ -321,7 +322,9 @@ static int __devinit davinci_mdio_probe(struct platform_device *pdev)
321 snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x", 322 snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
322 pdev->name, pdev->id); 323 pdev->name, pdev->id);
323 324
324 data->clk = clk_get(dev, NULL); 325 pm_runtime_enable(&pdev->dev);
326 pm_runtime_get_sync(&pdev->dev);
327 data->clk = clk_get(&pdev->dev, "fck");
325 if (IS_ERR(data->clk)) { 328 if (IS_ERR(data->clk)) {
326 dev_err(dev, "failed to get device clock\n"); 329 dev_err(dev, "failed to get device clock\n");
327 ret = PTR_ERR(data->clk); 330 ret = PTR_ERR(data->clk);
@@ -329,8 +332,6 @@ static int __devinit davinci_mdio_probe(struct platform_device *pdev)
329 goto bail_out; 332 goto bail_out;
330 } 333 }
331 334
332 clk_enable(data->clk);
333
334 dev_set_drvdata(dev, data); 335 dev_set_drvdata(dev, data);
335 data->dev = dev; 336 data->dev = dev;
336 spin_lock_init(&data->lock); 337 spin_lock_init(&data->lock);
@@ -378,10 +379,10 @@ bail_out:
378 if (data->bus) 379 if (data->bus)
379 mdiobus_free(data->bus); 380 mdiobus_free(data->bus);
380 381
381 if (data->clk) { 382 if (data->clk)
382 clk_disable(data->clk);
383 clk_put(data->clk); 383 clk_put(data->clk);
384 } 384 pm_runtime_put_sync(&pdev->dev);
385 pm_runtime_disable(&pdev->dev);
385 386
386 kfree(data); 387 kfree(data);
387 388
@@ -396,10 +397,10 @@ static int __devexit davinci_mdio_remove(struct platform_device *pdev)
396 if (data->bus) 397 if (data->bus)
397 mdiobus_free(data->bus); 398 mdiobus_free(data->bus);
398 399
399 if (data->clk) { 400 if (data->clk)
400 clk_disable(data->clk);
401 clk_put(data->clk); 401 clk_put(data->clk);
402 } 402 pm_runtime_put_sync(&pdev->dev);
403 pm_runtime_disable(&pdev->dev);
403 404
404 dev_set_drvdata(dev, NULL); 405 dev_set_drvdata(dev, NULL);
405 406
@@ -421,8 +422,7 @@ static int davinci_mdio_suspend(struct device *dev)
421 __raw_writel(ctrl, &data->regs->control); 422 __raw_writel(ctrl, &data->regs->control);
422 wait_for_idle(data); 423 wait_for_idle(data);
423 424
424 if (data->clk) 425 pm_runtime_put_sync(data->dev);
425 clk_disable(data->clk);
426 426
427 data->suspended = true; 427 data->suspended = true;
428 spin_unlock(&data->lock); 428 spin_unlock(&data->lock);
@@ -436,8 +436,7 @@ static int davinci_mdio_resume(struct device *dev)
436 u32 ctrl; 436 u32 ctrl;
437 437
438 spin_lock(&data->lock); 438 spin_lock(&data->lock);
439 if (data->clk) 439 pm_runtime_put_sync(data->dev);
440 clk_enable(data->clk);
441 440
442 /* restart the scan state machine */ 441 /* restart the scan state machine */
443 ctrl = __raw_readl(&data->regs->control); 442 ctrl = __raw_readl(&data->regs->control);
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 83b4b388ad49..7f500288f6b3 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -1844,7 +1844,7 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
1844 memcpy(dev->dev_addr, mac, 6); 1844 memcpy(dev->dev_addr, mac, 6);
1845 dev->addr_len = 6; 1845 dev->addr_len = 6;
1846 } else { 1846 } else {
1847 random_ether_addr(dev->dev_addr); 1847 eth_random_addr(dev->dev_addr);
1848 } 1848 }
1849 1849
1850 /* Register the network device. */ 1850 /* Register the network device. */
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 6199f6b387b6..c1ebfe9efcb3 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -114,7 +114,8 @@ spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
114 out_be32(card->regs + reg, value); 114 out_be32(card->regs + reg, value);
115} 115}
116 116
117/** spider_net_write_phy - write to phy register 117/**
118 * spider_net_write_phy - write to phy register
118 * @netdev: adapter to be written to 119 * @netdev: adapter to be written to
119 * @mii_id: id of MII 120 * @mii_id: id of MII
120 * @reg: PHY register 121 * @reg: PHY register
@@ -137,7 +138,8 @@ spider_net_write_phy(struct net_device *netdev, int mii_id,
137 spider_net_write_reg(card, SPIDER_NET_GPCWOPCMD, writevalue); 138 spider_net_write_reg(card, SPIDER_NET_GPCWOPCMD, writevalue);
138} 139}
139 140
140/** spider_net_read_phy - read from phy register 141/**
142 * spider_net_read_phy - read from phy register
141 * @netdev: network device to be read from 143 * @netdev: network device to be read from
142 * @mii_id: id of MII 144 * @mii_id: id of MII
143 * @reg: PHY register 145 * @reg: PHY register
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index ea3e0a21ba74..a46c19859683 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -486,7 +486,7 @@ static void __devinit velocity_get_options(struct velocity_opt *opts, int index,
486 velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname); 486 velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
487 velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname); 487 velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname);
488 velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname); 488 velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
489 velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname); 489 velocity_set_int_opt(&opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
490 opts->numrx = (opts->numrx & ~3); 490 opts->numrx = (opts->numrx & ~3);
491} 491}
492 492
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index a75e9ef5a4ce..a5826a3111a6 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -637,7 +637,7 @@ static int __devinit w5100_hw_probe(struct platform_device *pdev)
637 if (data && is_valid_ether_addr(data->mac_addr)) { 637 if (data && is_valid_ether_addr(data->mac_addr)) {
638 memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN); 638 memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
639 } else { 639 } else {
640 random_ether_addr(ndev->dev_addr); 640 eth_random_addr(ndev->dev_addr);
641 ndev->addr_assign_type |= NET_ADDR_RANDOM; 641 ndev->addr_assign_type |= NET_ADDR_RANDOM;
642 } 642 }
643 643
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 3306a20ec211..bdd8891c215a 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -557,7 +557,7 @@ static int __devinit w5300_hw_probe(struct platform_device *pdev)
557 if (data && is_valid_ether_addr(data->mac_addr)) { 557 if (data && is_valid_ether_addr(data->mac_addr)) {
558 memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN); 558 memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
559 } else { 559 } else {
560 random_ether_addr(ndev->dev_addr); 560 eth_random_addr(ndev->dev_addr);
561 ndev->addr_assign_type |= NET_ADDR_RANDOM; 561 ndev->addr_assign_type |= NET_ADDR_RANDOM;
562 } 562 }
563 563
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 1eaf7128afee..f8e351880119 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -197,7 +197,7 @@ static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
197#endif 197#endif
198 198
199/** 199/**
200 * * temac_dma_bd_release - Release buffer descriptor rings 200 * temac_dma_bd_release - Release buffer descriptor rings
201 */ 201 */
202static void temac_dma_bd_release(struct net_device *ndev) 202static void temac_dma_bd_release(struct net_device *ndev)
203{ 203{
@@ -768,7 +768,6 @@ static void ll_temac_recv(struct net_device *ndev)
768 DMA_FROM_DEVICE); 768 DMA_FROM_DEVICE);
769 769
770 skb_put(skb, length); 770 skb_put(skb, length);
771 skb->dev = ndev;
772 skb->protocol = eth_type_trans(skb, ndev); 771 skb->protocol = eth_type_trans(skb, ndev);
773 skb_checksum_none_assert(skb); 772 skb_checksum_none_assert(skb);
774 773
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 9c365e192a31..0793299bd39e 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -312,7 +312,7 @@ static void axienet_set_mac_address(struct net_device *ndev, void *address)
312 if (address) 312 if (address)
313 memcpy(ndev->dev_addr, address, ETH_ALEN); 313 memcpy(ndev->dev_addr, address, ETH_ALEN);
314 if (!is_valid_ether_addr(ndev->dev_addr)) 314 if (!is_valid_ether_addr(ndev->dev_addr))
315 random_ether_addr(ndev->dev_addr); 315 eth_random_addr(ndev->dev_addr);
316 316
317 /* Set up unicast MAC address filter set its mac address */ 317 /* Set up unicast MAC address filter set its mac address */
318 axienet_iow(lp, XAE_UAW0_OFFSET, 318 axienet_iow(lp, XAE_UAW0_OFFSET,
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 4ad80f771099..6695a1dadf4e 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -2962,7 +2962,7 @@ static int dfx_rcv_init(DFX_board_t *bp, int get_buffers)
2962 bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP | 2962 bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
2963 ((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN)); 2963 ((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
2964 bp->descr_block_virt->rcv_data[i+j].long_1 = (u32) (bp->rcv_block_phys + (i * PI_RCV_DATA_K_SIZE_MAX)); 2964 bp->descr_block_virt->rcv_data[i+j].long_1 = (u32) (bp->rcv_block_phys + (i * PI_RCV_DATA_K_SIZE_MAX));
2965 bp->p_rcv_buff_va[i+j] = (char *) (bp->rcv_block_virt + (i * PI_RCV_DATA_K_SIZE_MAX)); 2965 bp->p_rcv_buff_va[i+j] = (bp->rcv_block_virt + (i * PI_RCV_DATA_K_SIZE_MAX));
2966 } 2966 }
2967#endif 2967#endif
2968 } 2968 }
@@ -3030,7 +3030,7 @@ static void dfx_rcv_queue_process(
3030#ifdef DYNAMIC_BUFFERS 3030#ifdef DYNAMIC_BUFFERS
3031 p_buff = (char *) (((struct sk_buff *)bp->p_rcv_buff_va[entry])->data); 3031 p_buff = (char *) (((struct sk_buff *)bp->p_rcv_buff_va[entry])->data);
3032#else 3032#else
3033 p_buff = (char *) bp->p_rcv_buff_va[entry]; 3033 p_buff = bp->p_rcv_buff_va[entry];
3034#endif 3034#endif
3035 memcpy(&descr, p_buff + RCV_BUFF_K_DESCR, sizeof(u32)); 3035 memcpy(&descr, p_buff + RCV_BUFF_K_DESCR, sizeof(u32));
3036 3036
diff --git a/drivers/net/fddi/skfp/pmf.c b/drivers/net/fddi/skfp/pmf.c
index 9ac4665d7411..24d8566cfd8b 100644
--- a/drivers/net/fddi/skfp/pmf.c
+++ b/drivers/net/fddi/skfp/pmf.c
@@ -1242,7 +1242,7 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
1242 if (len < 8) 1242 if (len < 8)
1243 goto len_error ; 1243 goto len_error ;
1244 if (set) 1244 if (set)
1245 memcpy((char *) to,(char *) from+2,6) ; 1245 memcpy(to,from+2,6) ;
1246 to += 8 ; 1246 to += 8 ;
1247 from += 8 ; 1247 from += 8 ;
1248 len -= 8 ; 1248 len -= 8 ;
@@ -1251,7 +1251,7 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
1251 if (len < 4) 1251 if (len < 4)
1252 goto len_error ; 1252 goto len_error ;
1253 if (set) 1253 if (set)
1254 memcpy((char *) to,(char *) from,4) ; 1254 memcpy(to,from,4) ;
1255 to += 4 ; 1255 to += 4 ;
1256 from += 4 ; 1256 from += 4 ;
1257 len -= 4 ; 1257 len -= 4 ;
@@ -1260,7 +1260,7 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
1260 if (len < 8) 1260 if (len < 8)
1261 goto len_error ; 1261 goto len_error ;
1262 if (set) 1262 if (set)
1263 memcpy((char *) to,(char *) from,8) ; 1263 memcpy(to,from,8) ;
1264 to += 8 ; 1264 to += 8 ;
1265 from += 8 ; 1265 from += 8 ;
1266 len -= 8 ; 1266 len -= 8 ;
@@ -1269,7 +1269,7 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
1269 if (len < 32) 1269 if (len < 32)
1270 goto len_error ; 1270 goto len_error ;
1271 if (set) 1271 if (set)
1272 memcpy((char *) to,(char *) from,32) ; 1272 memcpy(to,from,32) ;
1273 to += 32 ; 1273 to += 32 ;
1274 from += 32 ; 1274 from += 32 ;
1275 len -= 32 ; 1275 len -= 32 ;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index aed1a6105b24..2c0894a92abd 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -485,7 +485,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
485 485
486 return; 486 return;
487 default: 487 default:
488 count = kiss_esc(p, (unsigned char *)ax->xbuff, len); 488 count = kiss_esc(p, ax->xbuff, len);
489 } 489 }
490 } else { 490 } else {
491 unsigned short crc; 491 unsigned short crc;
@@ -497,7 +497,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
497 case CRC_MODE_SMACK: 497 case CRC_MODE_SMACK:
498 *p |= 0x80; 498 *p |= 0x80;
499 crc = swab16(crc16(0, p, len)); 499 crc = swab16(crc16(0, p, len));
500 count = kiss_esc_crc(p, (unsigned char *)ax->xbuff, crc, len+2); 500 count = kiss_esc_crc(p, ax->xbuff, crc, len+2);
501 break; 501 break;
502 case CRC_MODE_FLEX_TEST: 502 case CRC_MODE_FLEX_TEST:
503 ax->crcmode = CRC_MODE_NONE; 503 ax->crcmode = CRC_MODE_NONE;
@@ -506,11 +506,11 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
506 case CRC_MODE_FLEX: 506 case CRC_MODE_FLEX:
507 *p |= 0x20; 507 *p |= 0x20;
508 crc = calc_crc_flex(p, len); 508 crc = calc_crc_flex(p, len);
509 count = kiss_esc_crc(p, (unsigned char *)ax->xbuff, crc, len+2); 509 count = kiss_esc_crc(p, ax->xbuff, crc, len+2);
510 break; 510 break;
511 511
512 default: 512 default:
513 count = kiss_esc(p, (unsigned char *)ax->xbuff, len); 513 count = kiss_esc(p, ax->xbuff, len);
514 } 514 }
515 } 515 }
516 spin_unlock_bh(&ax->buflock); 516 spin_unlock_bh(&ax->buflock);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 2857ab078aac..95ceb3593043 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -131,6 +131,7 @@ int rndis_filter_send(struct hv_device *dev,
131 struct hv_netvsc_packet *pkt); 131 struct hv_netvsc_packet *pkt);
132 132
133int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter); 133int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter);
134int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac);
134 135
135 136
136#define NVSP_INVALID_PROTOCOL_VERSION ((u32)0xFFFFFFFF) 137#define NVSP_INVALID_PROTOCOL_VERSION ((u32)0xFFFFFFFF)
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 0c569831db5a..6cee2917eb02 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -614,7 +614,7 @@ retry_send_cmplt:
614static void netvsc_receive_completion(void *context) 614static void netvsc_receive_completion(void *context)
615{ 615{
616 struct hv_netvsc_packet *packet = context; 616 struct hv_netvsc_packet *packet = context;
617 struct hv_device *device = (struct hv_device *)packet->device; 617 struct hv_device *device = packet->device;
618 struct netvsc_device *net_device; 618 struct netvsc_device *net_device;
619 u64 transaction_id = 0; 619 u64 transaction_id = 0;
620 bool fsend_receive_comp = false; 620 bool fsend_receive_comp = false;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 8f8ed3320425..8e23c084c4a7 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -341,6 +341,34 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
341 return 0; 341 return 0;
342} 342}
343 343
344
345static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
346{
347 struct net_device_context *ndevctx = netdev_priv(ndev);
348 struct hv_device *hdev = ndevctx->device_ctx;
349 struct sockaddr *addr = p;
350 char save_adr[14];
351 unsigned char save_aatype;
352 int err;
353
354 memcpy(save_adr, ndev->dev_addr, ETH_ALEN);
355 save_aatype = ndev->addr_assign_type;
356
357 err = eth_mac_addr(ndev, p);
358 if (err != 0)
359 return err;
360
361 err = rndis_filter_set_device_mac(hdev, addr->sa_data);
362 if (err != 0) {
363 /* roll back to saved MAC */
364 memcpy(ndev->dev_addr, save_adr, ETH_ALEN);
365 ndev->addr_assign_type = save_aatype;
366 }
367
368 return err;
369}
370
371
344static const struct ethtool_ops ethtool_ops = { 372static const struct ethtool_ops ethtool_ops = {
345 .get_drvinfo = netvsc_get_drvinfo, 373 .get_drvinfo = netvsc_get_drvinfo,
346 .get_link = ethtool_op_get_link, 374 .get_link = ethtool_op_get_link,
@@ -353,7 +381,7 @@ static const struct net_device_ops device_ops = {
353 .ndo_set_rx_mode = netvsc_set_multicast_list, 381 .ndo_set_rx_mode = netvsc_set_multicast_list,
354 .ndo_change_mtu = netvsc_change_mtu, 382 .ndo_change_mtu = netvsc_change_mtu,
355 .ndo_validate_addr = eth_validate_addr, 383 .ndo_validate_addr = eth_validate_addr,
356 .ndo_set_mac_address = eth_mac_addr, 384 .ndo_set_mac_address = netvsc_set_mac_addr,
357}; 385};
358 386
359/* 387/*
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 981ebb115637..fbf539468205 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -27,6 +27,7 @@
27#include <linux/if_ether.h> 27#include <linux/if_ether.h>
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/if_vlan.h> 29#include <linux/if_vlan.h>
30#include <linux/nls.h>
30 31
31#include "hyperv_net.h" 32#include "hyperv_net.h"
32 33
@@ -47,6 +48,7 @@ struct rndis_request {
47 struct hv_page_buffer buf; 48 struct hv_page_buffer buf;
48 /* FIXME: We assumed a fixed size request here. */ 49 /* FIXME: We assumed a fixed size request here. */
49 struct rndis_message request_msg; 50 struct rndis_message request_msg;
51 u8 ext[100];
50}; 52};
51 53
52static void rndis_filter_send_completion(void *ctx); 54static void rndis_filter_send_completion(void *ctx);
@@ -511,6 +513,83 @@ static int rndis_filter_query_device_mac(struct rndis_device *dev)
511 dev->hw_mac_adr, &size); 513 dev->hw_mac_adr, &size);
512} 514}
513 515
516#define NWADR_STR "NetworkAddress"
517#define NWADR_STRLEN 14
518
519int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac)
520{
521 struct netvsc_device *nvdev = hv_get_drvdata(hdev);
522 struct rndis_device *rdev = nvdev->extension;
523 struct net_device *ndev = nvdev->ndev;
524 struct rndis_request *request;
525 struct rndis_set_request *set;
526 struct rndis_config_parameter_info *cpi;
527 wchar_t *cfg_nwadr, *cfg_mac;
528 struct rndis_set_complete *set_complete;
529 char macstr[2*ETH_ALEN+1];
530 u32 extlen = sizeof(struct rndis_config_parameter_info) +
531 2*NWADR_STRLEN + 4*ETH_ALEN;
532 int ret, t;
533
534 request = get_rndis_request(rdev, RNDIS_MSG_SET,
535 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
536 if (!request)
537 return -ENOMEM;
538
539 set = &request->request_msg.msg.set_req;
540 set->oid = RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER;
541 set->info_buflen = extlen;
542 set->info_buf_offset = sizeof(struct rndis_set_request);
543 set->dev_vc_handle = 0;
544
545 cpi = (struct rndis_config_parameter_info *)((ulong)set +
546 set->info_buf_offset);
547 cpi->parameter_name_offset =
548 sizeof(struct rndis_config_parameter_info);
549 /* Multiply by 2 because host needs 2 bytes (utf16) for each char */
550 cpi->parameter_name_length = 2*NWADR_STRLEN;
551 cpi->parameter_type = RNDIS_CONFIG_PARAM_TYPE_STRING;
552 cpi->parameter_value_offset =
553 cpi->parameter_name_offset + cpi->parameter_name_length;
554 /* Multiply by 4 because each MAC byte displayed as 2 utf16 chars */
555 cpi->parameter_value_length = 4*ETH_ALEN;
556
557 cfg_nwadr = (wchar_t *)((ulong)cpi + cpi->parameter_name_offset);
558 cfg_mac = (wchar_t *)((ulong)cpi + cpi->parameter_value_offset);
559 ret = utf8s_to_utf16s(NWADR_STR, NWADR_STRLEN, UTF16_HOST_ENDIAN,
560 cfg_nwadr, NWADR_STRLEN);
561 if (ret < 0)
562 goto cleanup;
563 snprintf(macstr, 2*ETH_ALEN+1, "%pm", mac);
564 ret = utf8s_to_utf16s(macstr, 2*ETH_ALEN, UTF16_HOST_ENDIAN,
565 cfg_mac, 2*ETH_ALEN);
566 if (ret < 0)
567 goto cleanup;
568
569 ret = rndis_filter_send_request(rdev, request);
570 if (ret != 0)
571 goto cleanup;
572
573 t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
574 if (t == 0) {
575 netdev_err(ndev, "timeout before we got a set response...\n");
576 /*
577 * can't put_rndis_request, since we may still receive a
578 * send-completion.
579 */
580 return -EBUSY;
581 } else {
582 set_complete = &request->response_msg.msg.set_complete;
583 if (set_complete->status != RNDIS_STATUS_SUCCESS)
584 ret = -EINVAL;
585 }
586
587cleanup:
588 put_rndis_request(rdev, request);
589 return ret;
590}
591
592
514static int rndis_filter_query_device_link_status(struct rndis_device *dev) 593static int rndis_filter_query_device_link_status(struct rndis_device *dev)
515{ 594{
516 u32 size = sizeof(u32); 595 u32 size = sizeof(u32);
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index dcc80d652b78..84872043b5c6 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -1017,7 +1017,7 @@ static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
1017{ 1017{
1018 1018
1019 int iobase; 1019 int iobase;
1020 struct ali_ircc_cb *self = (struct ali_ircc_cb *) priv; 1020 struct ali_ircc_cb *self = priv;
1021 struct net_device *dev; 1021 struct net_device *dev;
1022 1022
1023 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ ); 1023 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
@@ -1052,7 +1052,7 @@ static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
1052 */ 1052 */
1053static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed) 1053static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
1054{ 1054{
1055 struct ali_ircc_cb *self = (struct ali_ircc_cb *) priv; 1055 struct ali_ircc_cb *self = priv;
1056 unsigned long flags; 1056 unsigned long flags;
1057 int iobase; 1057 int iobase;
1058 int fcr; /* FIFO control reg */ 1058 int fcr; /* FIFO control reg */
@@ -1121,7 +1121,7 @@ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
1121static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed) 1121static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
1122{ 1122{
1123 1123
1124 struct ali_ircc_cb *self = (struct ali_ircc_cb *) priv; 1124 struct ali_ircc_cb *self = priv;
1125 int iobase,dongle_id; 1125 int iobase,dongle_id;
1126 int tmp = 0; 1126 int tmp = 0;
1127 1127
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index fc503aa5288e..e09417df8f39 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -794,7 +794,7 @@ static int __devinit au1k_irda_net_init(struct net_device *dev)
794 794
795 /* allocate the data buffers */ 795 /* allocate the data buffers */
796 aup->db[0].vaddr = 796 aup->db[0].vaddr =
797 (void *)dma_alloc(MAX_BUF_SIZE * 2 * NUM_IR_DESC, &temp); 797 dma_alloc(MAX_BUF_SIZE * 2 * NUM_IR_DESC, &temp);
798 if (!aup->db[0].vaddr) 798 if (!aup->db[0].vaddr)
799 goto out3; 799 goto out3;
800 800
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 2ee56de7b0ca..0737bd4d1669 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -847,13 +847,12 @@ static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
847 const struct iovec *iv, unsigned long len, 847 const struct iovec *iv, unsigned long len,
848 int noblock) 848 int noblock)
849{ 849{
850 DECLARE_WAITQUEUE(wait, current); 850 DEFINE_WAIT(wait);
851 struct sk_buff *skb; 851 struct sk_buff *skb;
852 ssize_t ret = 0; 852 ssize_t ret = 0;
853 853
854 add_wait_queue(sk_sleep(&q->sk), &wait);
855 while (len) { 854 while (len) {
856 current->state = TASK_INTERRUPTIBLE; 855 prepare_to_wait(sk_sleep(&q->sk), &wait, TASK_INTERRUPTIBLE);
857 856
858 /* Read frames from the queue */ 857 /* Read frames from the queue */
859 skb = skb_dequeue(&q->sk.sk_receive_queue); 858 skb = skb_dequeue(&q->sk.sk_receive_queue);
@@ -875,8 +874,7 @@ static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
875 break; 874 break;
876 } 875 }
877 876
878 current->state = TASK_RUNNING; 877 finish_wait(sk_sleep(&q->sk), &wait);
879 remove_wait_queue(sk_sleep(&q->sk), &wait);
880 return ret; 878 return ret;
881} 879}
882 880
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 944cdfb80fe4..3090dc65a6f1 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -67,6 +67,11 @@ config BCM63XX_PHY
67 ---help--- 67 ---help---
68 Currently supports the 6348 and 6358 PHYs. 68 Currently supports the 6348 and 6358 PHYs.
69 69
70config BCM87XX_PHY
71 tristate "Driver for Broadcom BCM8706 and BCM8727 PHYs"
72 help
73 Currently supports the BCM8706 and BCM8727 10G Ethernet PHYs.
74
70config ICPLUS_PHY 75config ICPLUS_PHY
71 tristate "Drivers for ICPlus PHYs" 76 tristate "Drivers for ICPlus PHYs"
72 ---help--- 77 ---help---
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index f51af688ef8b..6d2dc6c94f2e 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_SMSC_PHY) += smsc.o
12obj-$(CONFIG_VITESSE_PHY) += vitesse.o 12obj-$(CONFIG_VITESSE_PHY) += vitesse.o
13obj-$(CONFIG_BROADCOM_PHY) += broadcom.o 13obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
14obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o 14obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o
15obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o
15obj-$(CONFIG_ICPLUS_PHY) += icplus.o 16obj-$(CONFIG_ICPLUS_PHY) += icplus.o
16obj-$(CONFIG_REALTEK_PHY) += realtek.o 17obj-$(CONFIG_REALTEK_PHY) += realtek.o
17obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o 18obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c
index cfabd5fe5372..a3fb5ceb6487 100644
--- a/drivers/net/phy/amd.c
+++ b/drivers/net/phy/amd.c
@@ -77,13 +77,7 @@ static struct phy_driver am79c_driver = {
77 77
78static int __init am79c_init(void) 78static int __init am79c_init(void)
79{ 79{
80 int ret; 80 return phy_driver_register(&am79c_driver);
81
82 ret = phy_driver_register(&am79c_driver);
83 if (ret)
84 return ret;
85
86 return 0;
87} 81}
88 82
89static void __exit am79c_exit(void) 83static void __exit am79c_exit(void)
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index cd802eb25fd2..84c7a39b1c65 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -71,7 +71,8 @@ static int bcm63xx_config_intr(struct phy_device *phydev)
71 return err; 71 return err;
72} 72}
73 73
74static struct phy_driver bcm63xx_1_driver = { 74static struct phy_driver bcm63xx_driver[] = {
75{
75 .phy_id = 0x00406000, 76 .phy_id = 0x00406000,
76 .phy_id_mask = 0xfffffc00, 77 .phy_id_mask = 0xfffffc00,
77 .name = "Broadcom BCM63XX (1)", 78 .name = "Broadcom BCM63XX (1)",
@@ -84,10 +85,8 @@ static struct phy_driver bcm63xx_1_driver = {
84 .ack_interrupt = bcm63xx_ack_interrupt, 85 .ack_interrupt = bcm63xx_ack_interrupt,
85 .config_intr = bcm63xx_config_intr, 86 .config_intr = bcm63xx_config_intr,
86 .driver = { .owner = THIS_MODULE }, 87 .driver = { .owner = THIS_MODULE },
87}; 88}, {
88 89 /* same phy as above, with just a different OUI */
89/* same phy as above, with just a different OUI */
90static struct phy_driver bcm63xx_2_driver = {
91 .phy_id = 0x002bdc00, 90 .phy_id = 0x002bdc00,
92 .phy_id_mask = 0xfffffc00, 91 .phy_id_mask = 0xfffffc00,
93 .name = "Broadcom BCM63XX (2)", 92 .name = "Broadcom BCM63XX (2)",
@@ -99,30 +98,18 @@ static struct phy_driver bcm63xx_2_driver = {
99 .ack_interrupt = bcm63xx_ack_interrupt, 98 .ack_interrupt = bcm63xx_ack_interrupt,
100 .config_intr = bcm63xx_config_intr, 99 .config_intr = bcm63xx_config_intr,
101 .driver = { .owner = THIS_MODULE }, 100 .driver = { .owner = THIS_MODULE },
102}; 101} };
103 102
104static int __init bcm63xx_phy_init(void) 103static int __init bcm63xx_phy_init(void)
105{ 104{
106 int ret; 105 return phy_drivers_register(bcm63xx_driver,
107 106 ARRAY_SIZE(bcm63xx_driver));
108 ret = phy_driver_register(&bcm63xx_1_driver);
109 if (ret)
110 goto out_63xx_1;
111 ret = phy_driver_register(&bcm63xx_2_driver);
112 if (ret)
113 goto out_63xx_2;
114 return ret;
115
116out_63xx_2:
117 phy_driver_unregister(&bcm63xx_1_driver);
118out_63xx_1:
119 return ret;
120} 107}
121 108
122static void __exit bcm63xx_phy_exit(void) 109static void __exit bcm63xx_phy_exit(void)
123{ 110{
124 phy_driver_unregister(&bcm63xx_1_driver); 111 phy_drivers_unregister(bcm63xx_driver,
125 phy_driver_unregister(&bcm63xx_2_driver); 112 ARRAY_SIZE(bcm63xx_driver));
126} 113}
127 114
128module_init(bcm63xx_phy_init); 115module_init(bcm63xx_phy_init);
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
new file mode 100644
index 000000000000..2346b38b9837
--- /dev/null
+++ b/drivers/net/phy/bcm87xx.c
@@ -0,0 +1,231 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2011 - 2012 Cavium, Inc.
7 */
8
9#include <linux/module.h>
10#include <linux/phy.h>
11#include <linux/of.h>
12
13#define PHY_ID_BCM8706 0x0143bdc1
14#define PHY_ID_BCM8727 0x0143bff0
15
16#define BCM87XX_PMD_RX_SIGNAL_DETECT (MII_ADDR_C45 | 0x1000a)
17#define BCM87XX_10GBASER_PCS_STATUS (MII_ADDR_C45 | 0x30020)
18#define BCM87XX_XGXS_LANE_STATUS (MII_ADDR_C45 | 0x40018)
19
20#define BCM87XX_LASI_CONTROL (MII_ADDR_C45 | 0x39002)
21#define BCM87XX_LASI_STATUS (MII_ADDR_C45 | 0x39005)
22
23#if IS_ENABLED(CONFIG_OF_MDIO)
24/* Set and/or override some configuration registers based on the
25 * broadcom,c45-reg-init property stored in the of_node for the phydev.
26 *
27 * broadcom,c45-reg-init = <devid reg mask value>,...;
28 *
29 * There may be one or more sets of <devid reg mask value>:
30 *
31 * devid: which sub-device to use.
32 * reg: the register.
33 * mask: if non-zero, ANDed with existing register value.
34 * value: ORed with the masked value and written to the regiser.
35 *
36 */
37static int bcm87xx_of_reg_init(struct phy_device *phydev)
38{
39 const __be32 *paddr;
40 const __be32 *paddr_end;
41 int len, ret;
42
43 if (!phydev->dev.of_node)
44 return 0;
45
46 paddr = of_get_property(phydev->dev.of_node,
47 "broadcom,c45-reg-init", &len);
48 if (!paddr)
49 return 0;
50
51 paddr_end = paddr + (len /= sizeof(*paddr));
52
53 ret = 0;
54
55 while (paddr + 3 < paddr_end) {
56 u16 devid = be32_to_cpup(paddr++);
57 u16 reg = be32_to_cpup(paddr++);
58 u16 mask = be32_to_cpup(paddr++);
59 u16 val_bits = be32_to_cpup(paddr++);
60 int val;
61 u32 regnum = MII_ADDR_C45 | (devid << 16) | reg;
62 val = 0;
63 if (mask) {
64 val = phy_read(phydev, regnum);
65 if (val < 0) {
66 ret = val;
67 goto err;
68 }
69 val &= mask;
70 }
71 val |= val_bits;
72
73 ret = phy_write(phydev, regnum, val);
74 if (ret < 0)
75 goto err;
76 }
77err:
78 return ret;
79}
80#else
81static int bcm87xx_of_reg_init(struct phy_device *phydev)
82{
83 return 0;
84}
85#endif /* CONFIG_OF_MDIO */
86
87static int bcm87xx_config_init(struct phy_device *phydev)
88{
89 phydev->supported = SUPPORTED_10000baseR_FEC;
90 phydev->advertising = ADVERTISED_10000baseR_FEC;
91 phydev->state = PHY_NOLINK;
92 phydev->autoneg = AUTONEG_DISABLE;
93
94 bcm87xx_of_reg_init(phydev);
95
96 return 0;
97}
98
99static int bcm87xx_config_aneg(struct phy_device *phydev)
100{
101 return -EINVAL;
102}
103
104static int bcm87xx_read_status(struct phy_device *phydev)
105{
106 int rx_signal_detect;
107 int pcs_status;
108 int xgxs_lane_status;
109
110 rx_signal_detect = phy_read(phydev, BCM87XX_PMD_RX_SIGNAL_DETECT);
111 if (rx_signal_detect < 0)
112 return rx_signal_detect;
113
114 if ((rx_signal_detect & 1) == 0)
115 goto no_link;
116
117 pcs_status = phy_read(phydev, BCM87XX_10GBASER_PCS_STATUS);
118 if (pcs_status < 0)
119 return pcs_status;
120
121 if ((pcs_status & 1) == 0)
122 goto no_link;
123
124 xgxs_lane_status = phy_read(phydev, BCM87XX_XGXS_LANE_STATUS);
125 if (xgxs_lane_status < 0)
126 return xgxs_lane_status;
127
128 if ((xgxs_lane_status & 0x1000) == 0)
129 goto no_link;
130
131 phydev->speed = 10000;
132 phydev->link = 1;
133 phydev->duplex = 1;
134 return 0;
135
136no_link:
137 phydev->link = 0;
138 return 0;
139}
140
141static int bcm87xx_config_intr(struct phy_device *phydev)
142{
143 int reg, err;
144
145 reg = phy_read(phydev, BCM87XX_LASI_CONTROL);
146
147 if (reg < 0)
148 return reg;
149
150 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
151 reg |= 1;
152 else
153 reg &= ~1;
154
155 err = phy_write(phydev, BCM87XX_LASI_CONTROL, reg);
156 return err;
157}
158
159static int bcm87xx_did_interrupt(struct phy_device *phydev)
160{
161 int reg;
162
163 reg = phy_read(phydev, BCM87XX_LASI_STATUS);
164
165 if (reg < 0) {
166 dev_err(&phydev->dev,
167 "Error: Read of BCM87XX_LASI_STATUS failed: %d\n", reg);
168 return 0;
169 }
170 return (reg & 1) != 0;
171}
172
173static int bcm87xx_ack_interrupt(struct phy_device *phydev)
174{
175 /* Reading the LASI status clears it. */
176 bcm87xx_did_interrupt(phydev);
177 return 0;
178}
179
180static int bcm8706_match_phy_device(struct phy_device *phydev)
181{
182 return phydev->c45_ids.device_ids[4] == PHY_ID_BCM8706;
183}
184
185static int bcm8727_match_phy_device(struct phy_device *phydev)
186{
187 return phydev->c45_ids.device_ids[4] == PHY_ID_BCM8727;
188}
189
190static struct phy_driver bcm87xx_driver[] = {
191{
192 .phy_id = PHY_ID_BCM8706,
193 .phy_id_mask = 0xffffffff,
194 .name = "Broadcom BCM8706",
195 .flags = PHY_HAS_INTERRUPT,
196 .config_init = bcm87xx_config_init,
197 .config_aneg = bcm87xx_config_aneg,
198 .read_status = bcm87xx_read_status,
199 .ack_interrupt = bcm87xx_ack_interrupt,
200 .config_intr = bcm87xx_config_intr,
201 .did_interrupt = bcm87xx_did_interrupt,
202 .match_phy_device = bcm8706_match_phy_device,
203 .driver = { .owner = THIS_MODULE },
204}, {
205 .phy_id = PHY_ID_BCM8727,
206 .phy_id_mask = 0xffffffff,
207 .name = "Broadcom BCM8727",
208 .flags = PHY_HAS_INTERRUPT,
209 .config_init = bcm87xx_config_init,
210 .config_aneg = bcm87xx_config_aneg,
211 .read_status = bcm87xx_read_status,
212 .ack_interrupt = bcm87xx_ack_interrupt,
213 .config_intr = bcm87xx_config_intr,
214 .did_interrupt = bcm87xx_did_interrupt,
215 .match_phy_device = bcm8727_match_phy_device,
216 .driver = { .owner = THIS_MODULE },
217} };
218
219static int __init bcm87xx_init(void)
220{
221 return phy_drivers_register(bcm87xx_driver,
222 ARRAY_SIZE(bcm87xx_driver));
223}
224module_init(bcm87xx_init);
225
226static void __exit bcm87xx_exit(void)
227{
228 phy_drivers_unregister(bcm87xx_driver,
229 ARRAY_SIZE(bcm87xx_driver));
230}
231module_exit(bcm87xx_exit);
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 60338ff63092..f8c90ea75108 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -682,7 +682,8 @@ static int brcm_fet_config_intr(struct phy_device *phydev)
682 return err; 682 return err;
683} 683}
684 684
685static struct phy_driver bcm5411_driver = { 685static struct phy_driver broadcom_drivers[] = {
686{
686 .phy_id = PHY_ID_BCM5411, 687 .phy_id = PHY_ID_BCM5411,
687 .phy_id_mask = 0xfffffff0, 688 .phy_id_mask = 0xfffffff0,
688 .name = "Broadcom BCM5411", 689 .name = "Broadcom BCM5411",
@@ -695,9 +696,7 @@ static struct phy_driver bcm5411_driver = {
695 .ack_interrupt = bcm54xx_ack_interrupt, 696 .ack_interrupt = bcm54xx_ack_interrupt,
696 .config_intr = bcm54xx_config_intr, 697 .config_intr = bcm54xx_config_intr,
697 .driver = { .owner = THIS_MODULE }, 698 .driver = { .owner = THIS_MODULE },
698}; 699}, {
699
700static struct phy_driver bcm5421_driver = {
701 .phy_id = PHY_ID_BCM5421, 700 .phy_id = PHY_ID_BCM5421,
702 .phy_id_mask = 0xfffffff0, 701 .phy_id_mask = 0xfffffff0,
703 .name = "Broadcom BCM5421", 702 .name = "Broadcom BCM5421",
@@ -710,9 +709,7 @@ static struct phy_driver bcm5421_driver = {
710 .ack_interrupt = bcm54xx_ack_interrupt, 709 .ack_interrupt = bcm54xx_ack_interrupt,
711 .config_intr = bcm54xx_config_intr, 710 .config_intr = bcm54xx_config_intr,
712 .driver = { .owner = THIS_MODULE }, 711 .driver = { .owner = THIS_MODULE },
713}; 712}, {
714
715static struct phy_driver bcm5461_driver = {
716 .phy_id = PHY_ID_BCM5461, 713 .phy_id = PHY_ID_BCM5461,
717 .phy_id_mask = 0xfffffff0, 714 .phy_id_mask = 0xfffffff0,
718 .name = "Broadcom BCM5461", 715 .name = "Broadcom BCM5461",
@@ -725,9 +722,7 @@ static struct phy_driver bcm5461_driver = {
725 .ack_interrupt = bcm54xx_ack_interrupt, 722 .ack_interrupt = bcm54xx_ack_interrupt,
726 .config_intr = bcm54xx_config_intr, 723 .config_intr = bcm54xx_config_intr,
727 .driver = { .owner = THIS_MODULE }, 724 .driver = { .owner = THIS_MODULE },
728}; 725}, {
729
730static struct phy_driver bcm5464_driver = {
731 .phy_id = PHY_ID_BCM5464, 726 .phy_id = PHY_ID_BCM5464,
732 .phy_id_mask = 0xfffffff0, 727 .phy_id_mask = 0xfffffff0,
733 .name = "Broadcom BCM5464", 728 .name = "Broadcom BCM5464",
@@ -740,9 +735,7 @@ static struct phy_driver bcm5464_driver = {
740 .ack_interrupt = bcm54xx_ack_interrupt, 735 .ack_interrupt = bcm54xx_ack_interrupt,
741 .config_intr = bcm54xx_config_intr, 736 .config_intr = bcm54xx_config_intr,
742 .driver = { .owner = THIS_MODULE }, 737 .driver = { .owner = THIS_MODULE },
743}; 738}, {
744
745static struct phy_driver bcm5481_driver = {
746 .phy_id = PHY_ID_BCM5481, 739 .phy_id = PHY_ID_BCM5481,
747 .phy_id_mask = 0xfffffff0, 740 .phy_id_mask = 0xfffffff0,
748 .name = "Broadcom BCM5481", 741 .name = "Broadcom BCM5481",
@@ -755,9 +748,7 @@ static struct phy_driver bcm5481_driver = {
755 .ack_interrupt = bcm54xx_ack_interrupt, 748 .ack_interrupt = bcm54xx_ack_interrupt,
756 .config_intr = bcm54xx_config_intr, 749 .config_intr = bcm54xx_config_intr,
757 .driver = { .owner = THIS_MODULE }, 750 .driver = { .owner = THIS_MODULE },
758}; 751}, {
759
760static struct phy_driver bcm5482_driver = {
761 .phy_id = PHY_ID_BCM5482, 752 .phy_id = PHY_ID_BCM5482,
762 .phy_id_mask = 0xfffffff0, 753 .phy_id_mask = 0xfffffff0,
763 .name = "Broadcom BCM5482", 754 .name = "Broadcom BCM5482",
@@ -770,9 +761,7 @@ static struct phy_driver bcm5482_driver = {
770 .ack_interrupt = bcm54xx_ack_interrupt, 761 .ack_interrupt = bcm54xx_ack_interrupt,
771 .config_intr = bcm54xx_config_intr, 762 .config_intr = bcm54xx_config_intr,
772 .driver = { .owner = THIS_MODULE }, 763 .driver = { .owner = THIS_MODULE },
773}; 764}, {
774
775static struct phy_driver bcm50610_driver = {
776 .phy_id = PHY_ID_BCM50610, 765 .phy_id = PHY_ID_BCM50610,
777 .phy_id_mask = 0xfffffff0, 766 .phy_id_mask = 0xfffffff0,
778 .name = "Broadcom BCM50610", 767 .name = "Broadcom BCM50610",
@@ -785,9 +774,7 @@ static struct phy_driver bcm50610_driver = {
785 .ack_interrupt = bcm54xx_ack_interrupt, 774 .ack_interrupt = bcm54xx_ack_interrupt,
786 .config_intr = bcm54xx_config_intr, 775 .config_intr = bcm54xx_config_intr,
787 .driver = { .owner = THIS_MODULE }, 776 .driver = { .owner = THIS_MODULE },
788}; 777}, {
789
790static struct phy_driver bcm50610m_driver = {
791 .phy_id = PHY_ID_BCM50610M, 778 .phy_id = PHY_ID_BCM50610M,
792 .phy_id_mask = 0xfffffff0, 779 .phy_id_mask = 0xfffffff0,
793 .name = "Broadcom BCM50610M", 780 .name = "Broadcom BCM50610M",
@@ -800,9 +787,7 @@ static struct phy_driver bcm50610m_driver = {
800 .ack_interrupt = bcm54xx_ack_interrupt, 787 .ack_interrupt = bcm54xx_ack_interrupt,
801 .config_intr = bcm54xx_config_intr, 788 .config_intr = bcm54xx_config_intr,
802 .driver = { .owner = THIS_MODULE }, 789 .driver = { .owner = THIS_MODULE },
803}; 790}, {
804
805static struct phy_driver bcm57780_driver = {
806 .phy_id = PHY_ID_BCM57780, 791 .phy_id = PHY_ID_BCM57780,
807 .phy_id_mask = 0xfffffff0, 792 .phy_id_mask = 0xfffffff0,
808 .name = "Broadcom BCM57780", 793 .name = "Broadcom BCM57780",
@@ -815,9 +800,7 @@ static struct phy_driver bcm57780_driver = {
815 .ack_interrupt = bcm54xx_ack_interrupt, 800 .ack_interrupt = bcm54xx_ack_interrupt,
816 .config_intr = bcm54xx_config_intr, 801 .config_intr = bcm54xx_config_intr,
817 .driver = { .owner = THIS_MODULE }, 802 .driver = { .owner = THIS_MODULE },
818}; 803}, {
819
820static struct phy_driver bcmac131_driver = {
821 .phy_id = PHY_ID_BCMAC131, 804 .phy_id = PHY_ID_BCMAC131,
822 .phy_id_mask = 0xfffffff0, 805 .phy_id_mask = 0xfffffff0,
823 .name = "Broadcom BCMAC131", 806 .name = "Broadcom BCMAC131",
@@ -830,9 +813,7 @@ static struct phy_driver bcmac131_driver = {
830 .ack_interrupt = brcm_fet_ack_interrupt, 813 .ack_interrupt = brcm_fet_ack_interrupt,
831 .config_intr = brcm_fet_config_intr, 814 .config_intr = brcm_fet_config_intr,
832 .driver = { .owner = THIS_MODULE }, 815 .driver = { .owner = THIS_MODULE },
833}; 816}, {
834
835static struct phy_driver bcm5241_driver = {
836 .phy_id = PHY_ID_BCM5241, 817 .phy_id = PHY_ID_BCM5241,
837 .phy_id_mask = 0xfffffff0, 818 .phy_id_mask = 0xfffffff0,
838 .name = "Broadcom BCM5241", 819 .name = "Broadcom BCM5241",
@@ -845,84 +826,18 @@ static struct phy_driver bcm5241_driver = {
845 .ack_interrupt = brcm_fet_ack_interrupt, 826 .ack_interrupt = brcm_fet_ack_interrupt,
846 .config_intr = brcm_fet_config_intr, 827 .config_intr = brcm_fet_config_intr,
847 .driver = { .owner = THIS_MODULE }, 828 .driver = { .owner = THIS_MODULE },
848}; 829} };
849 830
850static int __init broadcom_init(void) 831static int __init broadcom_init(void)
851{ 832{
852 int ret; 833 return phy_drivers_register(broadcom_drivers,
853 834 ARRAY_SIZE(broadcom_drivers));
854 ret = phy_driver_register(&bcm5411_driver);
855 if (ret)
856 goto out_5411;
857 ret = phy_driver_register(&bcm5421_driver);
858 if (ret)
859 goto out_5421;
860 ret = phy_driver_register(&bcm5461_driver);
861 if (ret)
862 goto out_5461;
863 ret = phy_driver_register(&bcm5464_driver);
864 if (ret)
865 goto out_5464;
866 ret = phy_driver_register(&bcm5481_driver);
867 if (ret)
868 goto out_5481;
869 ret = phy_driver_register(&bcm5482_driver);
870 if (ret)
871 goto out_5482;
872 ret = phy_driver_register(&bcm50610_driver);
873 if (ret)
874 goto out_50610;
875 ret = phy_driver_register(&bcm50610m_driver);
876 if (ret)
877 goto out_50610m;
878 ret = phy_driver_register(&bcm57780_driver);
879 if (ret)
880 goto out_57780;
881 ret = phy_driver_register(&bcmac131_driver);
882 if (ret)
883 goto out_ac131;
884 ret = phy_driver_register(&bcm5241_driver);
885 if (ret)
886 goto out_5241;
887 return ret;
888
889out_5241:
890 phy_driver_unregister(&bcmac131_driver);
891out_ac131:
892 phy_driver_unregister(&bcm57780_driver);
893out_57780:
894 phy_driver_unregister(&bcm50610m_driver);
895out_50610m:
896 phy_driver_unregister(&bcm50610_driver);
897out_50610:
898 phy_driver_unregister(&bcm5482_driver);
899out_5482:
900 phy_driver_unregister(&bcm5481_driver);
901out_5481:
902 phy_driver_unregister(&bcm5464_driver);
903out_5464:
904 phy_driver_unregister(&bcm5461_driver);
905out_5461:
906 phy_driver_unregister(&bcm5421_driver);
907out_5421:
908 phy_driver_unregister(&bcm5411_driver);
909out_5411:
910 return ret;
911} 835}
912 836
913static void __exit broadcom_exit(void) 837static void __exit broadcom_exit(void)
914{ 838{
915 phy_driver_unregister(&bcm5241_driver); 839 phy_drivers_unregister(broadcom_drivers,
916 phy_driver_unregister(&bcmac131_driver); 840 ARRAY_SIZE(broadcom_drivers));
917 phy_driver_unregister(&bcm57780_driver);
918 phy_driver_unregister(&bcm50610m_driver);
919 phy_driver_unregister(&bcm50610_driver);
920 phy_driver_unregister(&bcm5482_driver);
921 phy_driver_unregister(&bcm5481_driver);
922 phy_driver_unregister(&bcm5464_driver);
923 phy_driver_unregister(&bcm5461_driver);
924 phy_driver_unregister(&bcm5421_driver);
925 phy_driver_unregister(&bcm5411_driver);
926} 841}
927 842
928module_init(broadcom_init); 843module_init(broadcom_init);
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index d28173161c21..db472ffb6e89 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -102,7 +102,8 @@ static int cis820x_config_intr(struct phy_device *phydev)
102} 102}
103 103
104/* Cicada 8201, a.k.a Vitesse VSC8201 */ 104/* Cicada 8201, a.k.a Vitesse VSC8201 */
105static struct phy_driver cis8201_driver = { 105static struct phy_driver cis820x_driver[] = {
106{
106 .phy_id = 0x000fc410, 107 .phy_id = 0x000fc410,
107 .name = "Cicada Cis8201", 108 .name = "Cicada Cis8201",
108 .phy_id_mask = 0x000ffff0, 109 .phy_id_mask = 0x000ffff0,
@@ -113,11 +114,8 @@ static struct phy_driver cis8201_driver = {
113 .read_status = &genphy_read_status, 114 .read_status = &genphy_read_status,
114 .ack_interrupt = &cis820x_ack_interrupt, 115 .ack_interrupt = &cis820x_ack_interrupt,
115 .config_intr = &cis820x_config_intr, 116 .config_intr = &cis820x_config_intr,
116 .driver = { .owner = THIS_MODULE,}, 117 .driver = { .owner = THIS_MODULE,},
117}; 118}, {
118
119/* Cicada 8204 */
120static struct phy_driver cis8204_driver = {
121 .phy_id = 0x000fc440, 119 .phy_id = 0x000fc440,
122 .name = "Cicada Cis8204", 120 .name = "Cicada Cis8204",
123 .phy_id_mask = 0x000fffc0, 121 .phy_id_mask = 0x000fffc0,
@@ -128,32 +126,19 @@ static struct phy_driver cis8204_driver = {
128 .read_status = &genphy_read_status, 126 .read_status = &genphy_read_status,
129 .ack_interrupt = &cis820x_ack_interrupt, 127 .ack_interrupt = &cis820x_ack_interrupt,
130 .config_intr = &cis820x_config_intr, 128 .config_intr = &cis820x_config_intr,
131 .driver = { .owner = THIS_MODULE,}, 129 .driver = { .owner = THIS_MODULE,},
132}; 130} };
133 131
134static int __init cicada_init(void) 132static int __init cicada_init(void)
135{ 133{
136 int ret; 134 return phy_drivers_register(cis820x_driver,
137 135 ARRAY_SIZE(cis820x_driver));
138 ret = phy_driver_register(&cis8204_driver);
139 if (ret)
140 goto err1;
141
142 ret = phy_driver_register(&cis8201_driver);
143 if (ret)
144 goto err2;
145 return 0;
146
147err2:
148 phy_driver_unregister(&cis8204_driver);
149err1:
150 return ret;
151} 136}
152 137
153static void __exit cicada_exit(void) 138static void __exit cicada_exit(void)
154{ 139{
155 phy_driver_unregister(&cis8204_driver); 140 phy_drivers_unregister(cis820x_driver,
156 phy_driver_unregister(&cis8201_driver); 141 ARRAY_SIZE(cis820x_driver));
157} 142}
158 143
159module_init(cicada_init); 144module_init(cicada_init);
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 5f59cc064778..81c7bc010dd8 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -144,7 +144,8 @@ static int dm9161_ack_interrupt(struct phy_device *phydev)
144 return (err < 0) ? err : 0; 144 return (err < 0) ? err : 0;
145} 145}
146 146
147static struct phy_driver dm9161e_driver = { 147static struct phy_driver dm91xx_driver[] = {
148{
148 .phy_id = 0x0181b880, 149 .phy_id = 0x0181b880,
149 .name = "Davicom DM9161E", 150 .name = "Davicom DM9161E",
150 .phy_id_mask = 0x0ffffff0, 151 .phy_id_mask = 0x0ffffff0,
@@ -153,9 +154,7 @@ static struct phy_driver dm9161e_driver = {
153 .config_aneg = dm9161_config_aneg, 154 .config_aneg = dm9161_config_aneg,
154 .read_status = genphy_read_status, 155 .read_status = genphy_read_status,
155 .driver = { .owner = THIS_MODULE,}, 156 .driver = { .owner = THIS_MODULE,},
156}; 157}, {
157
158static struct phy_driver dm9161a_driver = {
159 .phy_id = 0x0181b8a0, 158 .phy_id = 0x0181b8a0,
160 .name = "Davicom DM9161A", 159 .name = "Davicom DM9161A",
161 .phy_id_mask = 0x0ffffff0, 160 .phy_id_mask = 0x0ffffff0,
@@ -164,9 +163,7 @@ static struct phy_driver dm9161a_driver = {
164 .config_aneg = dm9161_config_aneg, 163 .config_aneg = dm9161_config_aneg,
165 .read_status = genphy_read_status, 164 .read_status = genphy_read_status,
166 .driver = { .owner = THIS_MODULE,}, 165 .driver = { .owner = THIS_MODULE,},
167}; 166}, {
168
169static struct phy_driver dm9131_driver = {
170 .phy_id = 0x00181b80, 167 .phy_id = 0x00181b80,
171 .name = "Davicom DM9131", 168 .name = "Davicom DM9131",
172 .phy_id_mask = 0x0ffffff0, 169 .phy_id_mask = 0x0ffffff0,
@@ -177,38 +174,18 @@ static struct phy_driver dm9131_driver = {
177 .ack_interrupt = dm9161_ack_interrupt, 174 .ack_interrupt = dm9161_ack_interrupt,
178 .config_intr = dm9161_config_intr, 175 .config_intr = dm9161_config_intr,
179 .driver = { .owner = THIS_MODULE,}, 176 .driver = { .owner = THIS_MODULE,},
180}; 177} };
181 178
182static int __init davicom_init(void) 179static int __init davicom_init(void)
183{ 180{
184 int ret; 181 return phy_drivers_register(dm91xx_driver,
185 182 ARRAY_SIZE(dm91xx_driver));
186 ret = phy_driver_register(&dm9161e_driver);
187 if (ret)
188 goto err1;
189
190 ret = phy_driver_register(&dm9161a_driver);
191 if (ret)
192 goto err2;
193
194 ret = phy_driver_register(&dm9131_driver);
195 if (ret)
196 goto err3;
197 return 0;
198
199 err3:
200 phy_driver_unregister(&dm9161a_driver);
201 err2:
202 phy_driver_unregister(&dm9161e_driver);
203 err1:
204 return ret;
205} 183}
206 184
207static void __exit davicom_exit(void) 185static void __exit davicom_exit(void)
208{ 186{
209 phy_driver_unregister(&dm9161e_driver); 187 phy_drivers_unregister(dm91xx_driver,
210 phy_driver_unregister(&dm9161a_driver); 188 ARRAY_SIZE(dm91xx_driver));
211 phy_driver_unregister(&dm9131_driver);
212} 189}
213 190
214module_init(davicom_init); 191module_init(davicom_init);
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 940b29022d0c..b0da0226661f 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -17,6 +17,9 @@
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */ 19 */
20
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
20#include <linux/ethtool.h> 23#include <linux/ethtool.h>
21#include <linux/kernel.h> 24#include <linux/kernel.h>
22#include <linux/list.h> 25#include <linux/list.h>
@@ -453,16 +456,16 @@ static void enable_status_frames(struct phy_device *phydev, bool on)
453 ext_write(0, phydev, PAGE6, PSF_CFG1, ver); 456 ext_write(0, phydev, PAGE6, PSF_CFG1, ver);
454 457
455 if (!phydev->attached_dev) { 458 if (!phydev->attached_dev) {
456 pr_warning("dp83640: expected to find an attached netdevice\n"); 459 pr_warn("expected to find an attached netdevice\n");
457 return; 460 return;
458 } 461 }
459 462
460 if (on) { 463 if (on) {
461 if (dev_mc_add(phydev->attached_dev, status_frame_dst)) 464 if (dev_mc_add(phydev->attached_dev, status_frame_dst))
462 pr_warning("dp83640: failed to add mc address\n"); 465 pr_warn("failed to add mc address\n");
463 } else { 466 } else {
464 if (dev_mc_del(phydev->attached_dev, status_frame_dst)) 467 if (dev_mc_del(phydev->attached_dev, status_frame_dst))
465 pr_warning("dp83640: failed to delete mc address\n"); 468 pr_warn("failed to delete mc address\n");
466 } 469 }
467} 470}
468 471
@@ -582,9 +585,9 @@ static void recalibrate(struct dp83640_clock *clock)
582 * read out and correct offsets 585 * read out and correct offsets
583 */ 586 */
584 val = ext_read(master, PAGE4, PTP_STS); 587 val = ext_read(master, PAGE4, PTP_STS);
585 pr_info("master PTP_STS 0x%04hx", val); 588 pr_info("master PTP_STS 0x%04hx\n", val);
586 val = ext_read(master, PAGE4, PTP_ESTS); 589 val = ext_read(master, PAGE4, PTP_ESTS);
587 pr_info("master PTP_ESTS 0x%04hx", val); 590 pr_info("master PTP_ESTS 0x%04hx\n", val);
588 event_ts.ns_lo = ext_read(master, PAGE4, PTP_EDATA); 591 event_ts.ns_lo = ext_read(master, PAGE4, PTP_EDATA);
589 event_ts.ns_hi = ext_read(master, PAGE4, PTP_EDATA); 592 event_ts.ns_hi = ext_read(master, PAGE4, PTP_EDATA);
590 event_ts.sec_lo = ext_read(master, PAGE4, PTP_EDATA); 593 event_ts.sec_lo = ext_read(master, PAGE4, PTP_EDATA);
@@ -594,9 +597,9 @@ static void recalibrate(struct dp83640_clock *clock)
594 list_for_each(this, &clock->phylist) { 597 list_for_each(this, &clock->phylist) {
595 tmp = list_entry(this, struct dp83640_private, list); 598 tmp = list_entry(this, struct dp83640_private, list);
596 val = ext_read(tmp->phydev, PAGE4, PTP_STS); 599 val = ext_read(tmp->phydev, PAGE4, PTP_STS);
597 pr_info("slave PTP_STS 0x%04hx", val); 600 pr_info("slave PTP_STS 0x%04hx\n", val);
598 val = ext_read(tmp->phydev, PAGE4, PTP_ESTS); 601 val = ext_read(tmp->phydev, PAGE4, PTP_ESTS);
599 pr_info("slave PTP_ESTS 0x%04hx", val); 602 pr_info("slave PTP_ESTS 0x%04hx\n", val);
600 event_ts.ns_lo = ext_read(tmp->phydev, PAGE4, PTP_EDATA); 603 event_ts.ns_lo = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
601 event_ts.ns_hi = ext_read(tmp->phydev, PAGE4, PTP_EDATA); 604 event_ts.ns_hi = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
602 event_ts.sec_lo = ext_read(tmp->phydev, PAGE4, PTP_EDATA); 605 event_ts.sec_lo = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
@@ -686,7 +689,7 @@ static void decode_rxts(struct dp83640_private *dp83640,
686 prune_rx_ts(dp83640); 689 prune_rx_ts(dp83640);
687 690
688 if (list_empty(&dp83640->rxpool)) { 691 if (list_empty(&dp83640->rxpool)) {
689 pr_debug("dp83640: rx timestamp pool is empty\n"); 692 pr_debug("rx timestamp pool is empty\n");
690 goto out; 693 goto out;
691 } 694 }
692 rxts = list_first_entry(&dp83640->rxpool, struct rxts, list); 695 rxts = list_first_entry(&dp83640->rxpool, struct rxts, list);
@@ -709,7 +712,7 @@ static void decode_txts(struct dp83640_private *dp83640,
709 skb = skb_dequeue(&dp83640->tx_queue); 712 skb = skb_dequeue(&dp83640->tx_queue);
710 713
711 if (!skb) { 714 if (!skb) {
712 pr_debug("dp83640: have timestamp but tx_queue empty\n"); 715 pr_debug("have timestamp but tx_queue empty\n");
713 return; 716 return;
714 } 717 }
715 ns = phy2txts(phy_txts); 718 ns = phy2txts(phy_txts);
@@ -847,7 +850,7 @@ static void dp83640_free_clocks(void)
847 list_for_each_safe(this, next, &phyter_clocks) { 850 list_for_each_safe(this, next, &phyter_clocks) {
848 clock = list_entry(this, struct dp83640_clock, list); 851 clock = list_entry(this, struct dp83640_clock, list);
849 if (!list_empty(&clock->phylist)) { 852 if (!list_empty(&clock->phylist)) {
850 pr_warning("phy list non-empty while unloading"); 853 pr_warn("phy list non-empty while unloading\n");
851 BUG(); 854 BUG();
852 } 855 }
853 list_del(&clock->list); 856 list_del(&clock->list);
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c
index 633680d0828e..ba55adfc7aae 100644
--- a/drivers/net/phy/fixed.c
+++ b/drivers/net/phy/fixed.c
@@ -70,7 +70,7 @@ static int fixed_phy_update_regs(struct fixed_phy *fp)
70 lpa |= LPA_10FULL; 70 lpa |= LPA_10FULL;
71 break; 71 break;
72 default: 72 default:
73 printk(KERN_WARNING "fixed phy: unknown speed\n"); 73 pr_warn("fixed phy: unknown speed\n");
74 return -EINVAL; 74 return -EINVAL;
75 } 75 }
76 } else { 76 } else {
@@ -90,7 +90,7 @@ static int fixed_phy_update_regs(struct fixed_phy *fp)
90 lpa |= LPA_10HALF; 90 lpa |= LPA_10HALF;
91 break; 91 break;
92 default: 92 default:
93 printk(KERN_WARNING "fixed phy: unknown speed\n"); 93 pr_warn("fixed phy: unknown speed\n");
94 return -EINVAL; 94 return -EINVAL;
95 } 95 }
96 } 96 }
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 47f8e8939266..d5199cb4caec 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -202,7 +202,8 @@ static int ip101a_g_ack_interrupt(struct phy_device *phydev)
202 return 0; 202 return 0;
203} 203}
204 204
205static struct phy_driver ip175c_driver = { 205static struct phy_driver icplus_driver[] = {
206{
206 .phy_id = 0x02430d80, 207 .phy_id = 0x02430d80,
207 .name = "ICPlus IP175C", 208 .name = "ICPlus IP175C",
208 .phy_id_mask = 0x0ffffff0, 209 .phy_id_mask = 0x0ffffff0,
@@ -213,9 +214,7 @@ static struct phy_driver ip175c_driver = {
213 .suspend = genphy_suspend, 214 .suspend = genphy_suspend,
214 .resume = genphy_resume, 215 .resume = genphy_resume,
215 .driver = { .owner = THIS_MODULE,}, 216 .driver = { .owner = THIS_MODULE,},
216}; 217}, {
217
218static struct phy_driver ip1001_driver = {
219 .phy_id = 0x02430d90, 218 .phy_id = 0x02430d90,
220 .name = "ICPlus IP1001", 219 .name = "ICPlus IP1001",
221 .phy_id_mask = 0x0ffffff0, 220 .phy_id_mask = 0x0ffffff0,
@@ -227,9 +226,7 @@ static struct phy_driver ip1001_driver = {
227 .suspend = genphy_suspend, 226 .suspend = genphy_suspend,
228 .resume = genphy_resume, 227 .resume = genphy_resume,
229 .driver = { .owner = THIS_MODULE,}, 228 .driver = { .owner = THIS_MODULE,},
230}; 229}, {
231
232static struct phy_driver ip101a_g_driver = {
233 .phy_id = 0x02430c54, 230 .phy_id = 0x02430c54,
234 .name = "ICPlus IP101A/G", 231 .name = "ICPlus IP101A/G",
235 .phy_id_mask = 0x0ffffff0, 232 .phy_id_mask = 0x0ffffff0,
@@ -243,28 +240,18 @@ static struct phy_driver ip101a_g_driver = {
243 .suspend = genphy_suspend, 240 .suspend = genphy_suspend,
244 .resume = genphy_resume, 241 .resume = genphy_resume,
245 .driver = { .owner = THIS_MODULE,}, 242 .driver = { .owner = THIS_MODULE,},
246}; 243} };
247 244
248static int __init icplus_init(void) 245static int __init icplus_init(void)
249{ 246{
250 int ret = 0; 247 return phy_drivers_register(icplus_driver,
251 248 ARRAY_SIZE(icplus_driver));
252 ret = phy_driver_register(&ip1001_driver);
253 if (ret < 0)
254 return -ENODEV;
255
256 ret = phy_driver_register(&ip101a_g_driver);
257 if (ret < 0)
258 return -ENODEV;
259
260 return phy_driver_register(&ip175c_driver);
261} 249}
262 250
263static void __exit icplus_exit(void) 251static void __exit icplus_exit(void)
264{ 252{
265 phy_driver_unregister(&ip1001_driver); 253 phy_drivers_unregister(icplus_driver,
266 phy_driver_unregister(&ip101a_g_driver); 254 ARRAY_SIZE(icplus_driver));
267 phy_driver_unregister(&ip175c_driver);
268} 255}
269 256
270module_init(icplus_init); 257module_init(icplus_init);
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index 6f6e8b616a62..6d1e3fcc43e2 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -149,7 +149,8 @@ static int lxt973_config_aneg(struct phy_device *phydev)
149 return phydev->priv ? 0 : genphy_config_aneg(phydev); 149 return phydev->priv ? 0 : genphy_config_aneg(phydev);
150} 150}
151 151
152static struct phy_driver lxt970_driver = { 152static struct phy_driver lxt97x_driver[] = {
153{
153 .phy_id = 0x78100000, 154 .phy_id = 0x78100000,
154 .name = "LXT970", 155 .name = "LXT970",
155 .phy_id_mask = 0xfffffff0, 156 .phy_id_mask = 0xfffffff0,
@@ -160,10 +161,8 @@ static struct phy_driver lxt970_driver = {
160 .read_status = genphy_read_status, 161 .read_status = genphy_read_status,
161 .ack_interrupt = lxt970_ack_interrupt, 162 .ack_interrupt = lxt970_ack_interrupt,
162 .config_intr = lxt970_config_intr, 163 .config_intr = lxt970_config_intr,
163 .driver = { .owner = THIS_MODULE,}, 164 .driver = { .owner = THIS_MODULE,},
164}; 165}, {
165
166static struct phy_driver lxt971_driver = {
167 .phy_id = 0x001378e0, 166 .phy_id = 0x001378e0,
168 .name = "LXT971", 167 .name = "LXT971",
169 .phy_id_mask = 0xfffffff0, 168 .phy_id_mask = 0xfffffff0,
@@ -173,10 +172,8 @@ static struct phy_driver lxt971_driver = {
173 .read_status = genphy_read_status, 172 .read_status = genphy_read_status,
174 .ack_interrupt = lxt971_ack_interrupt, 173 .ack_interrupt = lxt971_ack_interrupt,
175 .config_intr = lxt971_config_intr, 174 .config_intr = lxt971_config_intr,
176 .driver = { .owner = THIS_MODULE,}, 175 .driver = { .owner = THIS_MODULE,},
177}; 176}, {
178
179static struct phy_driver lxt973_driver = {
180 .phy_id = 0x00137a10, 177 .phy_id = 0x00137a10,
181 .name = "LXT973", 178 .name = "LXT973",
182 .phy_id_mask = 0xfffffff0, 179 .phy_id_mask = 0xfffffff0,
@@ -185,39 +182,19 @@ static struct phy_driver lxt973_driver = {
185 .probe = lxt973_probe, 182 .probe = lxt973_probe,
186 .config_aneg = lxt973_config_aneg, 183 .config_aneg = lxt973_config_aneg,
187 .read_status = genphy_read_status, 184 .read_status = genphy_read_status,
188 .driver = { .owner = THIS_MODULE,}, 185 .driver = { .owner = THIS_MODULE,},
189}; 186} };
190 187
191static int __init lxt_init(void) 188static int __init lxt_init(void)
192{ 189{
193 int ret; 190 return phy_drivers_register(lxt97x_driver,
194 191 ARRAY_SIZE(lxt97x_driver));
195 ret = phy_driver_register(&lxt970_driver);
196 if (ret)
197 goto err1;
198
199 ret = phy_driver_register(&lxt971_driver);
200 if (ret)
201 goto err2;
202
203 ret = phy_driver_register(&lxt973_driver);
204 if (ret)
205 goto err3;
206 return 0;
207
208 err3:
209 phy_driver_unregister(&lxt971_driver);
210 err2:
211 phy_driver_unregister(&lxt970_driver);
212 err1:
213 return ret;
214} 192}
215 193
216static void __exit lxt_exit(void) 194static void __exit lxt_exit(void)
217{ 195{
218 phy_driver_unregister(&lxt970_driver); 196 phy_drivers_unregister(lxt97x_driver,
219 phy_driver_unregister(&lxt971_driver); 197 ARRAY_SIZE(lxt97x_driver));
220 phy_driver_unregister(&lxt973_driver);
221} 198}
222 199
223module_init(lxt_init); 200module_init(lxt_init);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 418928d644bf..5d2a3f215887 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -826,28 +826,14 @@ static struct phy_driver marvell_drivers[] = {
826 826
827static int __init marvell_init(void) 827static int __init marvell_init(void)
828{ 828{
829 int ret; 829 return phy_drivers_register(marvell_drivers,
830 int i; 830 ARRAY_SIZE(marvell_drivers));
831
832 for (i = 0; i < ARRAY_SIZE(marvell_drivers); i++) {
833 ret = phy_driver_register(&marvell_drivers[i]);
834
835 if (ret) {
836 while (i-- > 0)
837 phy_driver_unregister(&marvell_drivers[i]);
838 return ret;
839 }
840 }
841
842 return 0;
843} 831}
844 832
845static void __exit marvell_exit(void) 833static void __exit marvell_exit(void)
846{ 834{
847 int i; 835 phy_drivers_unregister(marvell_drivers,
848 836 ARRAY_SIZE(marvell_drivers));
849 for (i = 0; i < ARRAY_SIZE(marvell_drivers); i++)
850 phy_driver_unregister(&marvell_drivers[i]);
851} 837}
852 838
853module_init(marvell_init); 839module_init(marvell_init);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 5061608f408c..170eb411ab5d 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -13,6 +13,9 @@
13 * option) any later version. 13 * option) any later version.
14 * 14 *
15 */ 15 */
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
16#include <linux/kernel.h> 19#include <linux/kernel.h>
17#include <linux/string.h> 20#include <linux/string.h>
18#include <linux/errno.h> 21#include <linux/errno.h>
@@ -22,6 +25,7 @@
22#include <linux/init.h> 25#include <linux/init.h>
23#include <linux/delay.h> 26#include <linux/delay.h>
24#include <linux/device.h> 27#include <linux/device.h>
28#include <linux/of_device.h>
25#include <linux/netdevice.h> 29#include <linux/netdevice.h>
26#include <linux/etherdevice.h> 30#include <linux/etherdevice.h>
27#include <linux/skbuff.h> 31#include <linux/skbuff.h>
@@ -148,7 +152,7 @@ int mdiobus_register(struct mii_bus *bus)
148 152
149 err = device_register(&bus->dev); 153 err = device_register(&bus->dev);
150 if (err) { 154 if (err) {
151 printk(KERN_ERR "mii_bus %s failed to register\n", bus->id); 155 pr_err("mii_bus %s failed to register\n", bus->id);
152 return -EINVAL; 156 return -EINVAL;
153 } 157 }
154 158
@@ -229,7 +233,7 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
229 struct phy_device *phydev; 233 struct phy_device *phydev;
230 int err; 234 int err;
231 235
232 phydev = get_phy_device(bus, addr); 236 phydev = get_phy_device(bus, addr, false);
233 if (IS_ERR(phydev) || phydev == NULL) 237 if (IS_ERR(phydev) || phydev == NULL)
234 return phydev; 238 return phydev;
235 239
@@ -305,6 +309,12 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
305 struct phy_device *phydev = to_phy_device(dev); 309 struct phy_device *phydev = to_phy_device(dev);
306 struct phy_driver *phydrv = to_phy_driver(drv); 310 struct phy_driver *phydrv = to_phy_driver(drv);
307 311
312 if (of_driver_match_device(dev, drv))
313 return 1;
314
315 if (phydrv->match_phy_device)
316 return phydrv->match_phy_device(phydev);
317
308 return ((phydrv->phy_id & phydrv->phy_id_mask) == 318 return ((phydrv->phy_id & phydrv->phy_id_mask) ==
309 (phydev->phy_id & phydrv->phy_id_mask)); 319 (phydev->phy_id & phydrv->phy_id_mask));
310} 320}
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 9d6c80c8a0cf..cf287e0eb408 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -114,7 +114,8 @@ static int ks8051_config_init(struct phy_device *phydev)
114 return 0; 114 return 0;
115} 115}
116 116
117static struct phy_driver ks8737_driver = { 117static struct phy_driver ksphy_driver[] = {
118{
118 .phy_id = PHY_ID_KS8737, 119 .phy_id = PHY_ID_KS8737,
119 .phy_id_mask = 0x00fffff0, 120 .phy_id_mask = 0x00fffff0,
120 .name = "Micrel KS8737", 121 .name = "Micrel KS8737",
@@ -126,9 +127,7 @@ static struct phy_driver ks8737_driver = {
126 .ack_interrupt = kszphy_ack_interrupt, 127 .ack_interrupt = kszphy_ack_interrupt,
127 .config_intr = ks8737_config_intr, 128 .config_intr = ks8737_config_intr,
128 .driver = { .owner = THIS_MODULE,}, 129 .driver = { .owner = THIS_MODULE,},
129}; 130}, {
130
131static struct phy_driver ks8041_driver = {
132 .phy_id = PHY_ID_KS8041, 131 .phy_id = PHY_ID_KS8041,
133 .phy_id_mask = 0x00fffff0, 132 .phy_id_mask = 0x00fffff0,
134 .name = "Micrel KS8041", 133 .name = "Micrel KS8041",
@@ -141,9 +140,7 @@ static struct phy_driver ks8041_driver = {
141 .ack_interrupt = kszphy_ack_interrupt, 140 .ack_interrupt = kszphy_ack_interrupt,
142 .config_intr = kszphy_config_intr, 141 .config_intr = kszphy_config_intr,
143 .driver = { .owner = THIS_MODULE,}, 142 .driver = { .owner = THIS_MODULE,},
144}; 143}, {
145
146static struct phy_driver ks8051_driver = {
147 .phy_id = PHY_ID_KS8051, 144 .phy_id = PHY_ID_KS8051,
148 .phy_id_mask = 0x00fffff0, 145 .phy_id_mask = 0x00fffff0,
149 .name = "Micrel KS8051", 146 .name = "Micrel KS8051",
@@ -156,9 +153,7 @@ static struct phy_driver ks8051_driver = {
156 .ack_interrupt = kszphy_ack_interrupt, 153 .ack_interrupt = kszphy_ack_interrupt,
157 .config_intr = kszphy_config_intr, 154 .config_intr = kszphy_config_intr,
158 .driver = { .owner = THIS_MODULE,}, 155 .driver = { .owner = THIS_MODULE,},
159}; 156}, {
160
161static struct phy_driver ks8001_driver = {
162 .phy_id = PHY_ID_KS8001, 157 .phy_id = PHY_ID_KS8001,
163 .name = "Micrel KS8001 or KS8721", 158 .name = "Micrel KS8001 or KS8721",
164 .phy_id_mask = 0x00ffffff, 159 .phy_id_mask = 0x00ffffff,
@@ -170,9 +165,7 @@ static struct phy_driver ks8001_driver = {
170 .ack_interrupt = kszphy_ack_interrupt, 165 .ack_interrupt = kszphy_ack_interrupt,
171 .config_intr = kszphy_config_intr, 166 .config_intr = kszphy_config_intr,
172 .driver = { .owner = THIS_MODULE,}, 167 .driver = { .owner = THIS_MODULE,},
173}; 168}, {
174
175static struct phy_driver ksz9021_driver = {
176 .phy_id = PHY_ID_KSZ9021, 169 .phy_id = PHY_ID_KSZ9021,
177 .phy_id_mask = 0x000ffffe, 170 .phy_id_mask = 0x000ffffe,
178 .name = "Micrel KSZ9021 Gigabit PHY", 171 .name = "Micrel KSZ9021 Gigabit PHY",
@@ -185,51 +178,18 @@ static struct phy_driver ksz9021_driver = {
185 .ack_interrupt = kszphy_ack_interrupt, 178 .ack_interrupt = kszphy_ack_interrupt,
186 .config_intr = ksz9021_config_intr, 179 .config_intr = ksz9021_config_intr,
187 .driver = { .owner = THIS_MODULE, }, 180 .driver = { .owner = THIS_MODULE, },
188}; 181} };
189 182
190static int __init ksphy_init(void) 183static int __init ksphy_init(void)
191{ 184{
192 int ret; 185 return phy_drivers_register(ksphy_driver,
193 186 ARRAY_SIZE(ksphy_driver));
194 ret = phy_driver_register(&ks8001_driver);
195 if (ret)
196 goto err1;
197
198 ret = phy_driver_register(&ksz9021_driver);
199 if (ret)
200 goto err2;
201
202 ret = phy_driver_register(&ks8737_driver);
203 if (ret)
204 goto err3;
205 ret = phy_driver_register(&ks8041_driver);
206 if (ret)
207 goto err4;
208 ret = phy_driver_register(&ks8051_driver);
209 if (ret)
210 goto err5;
211
212 return 0;
213
214err5:
215 phy_driver_unregister(&ks8041_driver);
216err4:
217 phy_driver_unregister(&ks8737_driver);
218err3:
219 phy_driver_unregister(&ksz9021_driver);
220err2:
221 phy_driver_unregister(&ks8001_driver);
222err1:
223 return ret;
224} 187}
225 188
226static void __exit ksphy_exit(void) 189static void __exit ksphy_exit(void)
227{ 190{
228 phy_driver_unregister(&ks8001_driver); 191 phy_drivers_unregister(ksphy_driver,
229 phy_driver_unregister(&ks8737_driver); 192 ARRAY_SIZE(ksphy_driver));
230 phy_driver_unregister(&ksz9021_driver);
231 phy_driver_unregister(&ks8041_driver);
232 phy_driver_unregister(&ks8051_driver);
233} 193}
234 194
235module_init(ksphy_init); 195module_init(ksphy_init);
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 04bb8fcc0cb5..9a5f234d95b0 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -15,6 +15,8 @@
15 * 15 *
16 */ 16 */
17 17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
18#include <linux/kernel.h> 20#include <linux/kernel.h>
19#include <linux/module.h> 21#include <linux/module.h>
20#include <linux/mii.h> 22#include <linux/mii.h>
@@ -22,6 +24,8 @@
22#include <linux/phy.h> 24#include <linux/phy.h>
23#include <linux/netdevice.h> 25#include <linux/netdevice.h>
24 26
27#define DEBUG
28
25/* DP83865 phy identifier values */ 29/* DP83865 phy identifier values */
26#define DP83865_PHY_ID 0x20005c7a 30#define DP83865_PHY_ID 0x20005c7a
27 31
@@ -112,8 +116,8 @@ static void ns_10_base_t_hdx_loopack(struct phy_device *phydev, int disable)
112 ns_exp_write(phydev, 0x1c0, 116 ns_exp_write(phydev, 0x1c0,
113 ns_exp_read(phydev, 0x1c0) & 0xfffe); 117 ns_exp_read(phydev, 0x1c0) & 0xfffe);
114 118
115 printk(KERN_DEBUG "DP83865 PHY: 10BASE-T HDX loopback %s\n", 119 pr_debug("10BASE-T HDX loopback %s\n",
116 (ns_exp_read(phydev, 0x1c0) & 0x0001) ? "off" : "on"); 120 (ns_exp_read(phydev, 0x1c0) & 0x0001) ? "off" : "on");
117} 121}
118 122
119static int ns_config_init(struct phy_device *phydev) 123static int ns_config_init(struct phy_device *phydev)
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 3cbda0851f83..7ca2ff97c368 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -15,6 +15,9 @@
15 * option) any later version. 15 * option) any later version.
16 * 16 *
17 */ 17 */
18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
18#include <linux/kernel.h> 21#include <linux/kernel.h>
19#include <linux/string.h> 22#include <linux/string.h>
20#include <linux/errno.h> 23#include <linux/errno.h>
@@ -32,6 +35,7 @@
32#include <linux/phy.h> 35#include <linux/phy.h>
33#include <linux/timer.h> 36#include <linux/timer.h>
34#include <linux/workqueue.h> 37#include <linux/workqueue.h>
38#include <linux/mdio.h>
35 39
36#include <linux/atomic.h> 40#include <linux/atomic.h>
37#include <asm/io.h> 41#include <asm/io.h>
@@ -44,18 +48,16 @@
44 */ 48 */
45void phy_print_status(struct phy_device *phydev) 49void phy_print_status(struct phy_device *phydev)
46{ 50{
47 pr_info("PHY: %s - Link is %s", dev_name(&phydev->dev),
48 phydev->link ? "Up" : "Down");
49 if (phydev->link) 51 if (phydev->link)
50 printk(KERN_CONT " - %d/%s", phydev->speed, 52 pr_info("%s - Link is Up - %d/%s\n",
51 DUPLEX_FULL == phydev->duplex ? 53 dev_name(&phydev->dev),
52 "Full" : "Half"); 54 phydev->speed,
53 55 DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
54 printk(KERN_CONT "\n"); 56 else
57 pr_info("%s - Link is Down\n", dev_name(&phydev->dev));
55} 58}
56EXPORT_SYMBOL(phy_print_status); 59EXPORT_SYMBOL(phy_print_status);
57 60
58
59/** 61/**
60 * phy_clear_interrupt - Ack the phy device's interrupt 62 * phy_clear_interrupt - Ack the phy device's interrupt
61 * @phydev: the phy_device struct 63 * @phydev: the phy_device struct
@@ -482,9 +484,8 @@ static void phy_force_reduction(struct phy_device *phydev)
482 phydev->speed = settings[idx].speed; 484 phydev->speed = settings[idx].speed;
483 phydev->duplex = settings[idx].duplex; 485 phydev->duplex = settings[idx].duplex;
484 486
485 pr_info("Trying %d/%s\n", phydev->speed, 487 pr_info("Trying %d/%s\n",
486 DUPLEX_FULL == phydev->duplex ? 488 phydev->speed, DUPLEX_FULL == phydev->duplex ? "FULL" : "HALF");
487 "FULL" : "HALF");
488} 489}
489 490
490 491
@@ -598,9 +599,8 @@ int phy_start_interrupts(struct phy_device *phydev)
598 IRQF_SHARED, 599 IRQF_SHARED,
599 "phy_interrupt", 600 "phy_interrupt",
600 phydev) < 0) { 601 phydev) < 0) {
601 printk(KERN_WARNING "%s: Can't get IRQ %d (PHY)\n", 602 pr_warn("%s: Can't get IRQ %d (PHY)\n",
602 phydev->bus->name, 603 phydev->bus->name, phydev->irq);
603 phydev->irq);
604 phydev->irq = PHY_POLL; 604 phydev->irq = PHY_POLL;
605 return 0; 605 return 0;
606 } 606 }
@@ -838,10 +838,10 @@ void phy_state_machine(struct work_struct *work)
838 838
839 phydev->autoneg = AUTONEG_DISABLE; 839 phydev->autoneg = AUTONEG_DISABLE;
840 840
841 pr_info("Trying %d/%s\n", phydev->speed, 841 pr_info("Trying %d/%s\n",
842 DUPLEX_FULL == 842 phydev->speed,
843 phydev->duplex ? 843 DUPLEX_FULL == phydev->duplex ?
844 "FULL" : "HALF"); 844 "FULL" : "HALF");
845 } 845 }
846 break; 846 break;
847 case PHY_NOLINK: 847 case PHY_NOLINK:
@@ -968,3 +968,283 @@ void phy_state_machine(struct work_struct *work)
968 968
969 schedule_delayed_work(&phydev->state_queue, PHY_STATE_TIME * HZ); 969 schedule_delayed_work(&phydev->state_queue, PHY_STATE_TIME * HZ);
970} 970}
971
972static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad,
973 int addr)
974{
975 /* Write the desired MMD Devad */
976 bus->write(bus, addr, MII_MMD_CTRL, devad);
977
978 /* Write the desired MMD register address */
979 bus->write(bus, addr, MII_MMD_DATA, prtad);
980
981 /* Select the Function : DATA with no post increment */
982 bus->write(bus, addr, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR));
983}
984
985/**
986 * phy_read_mmd_indirect - reads data from the MMD registers
987 * @bus: the target MII bus
988 * @prtad: MMD Address
989 * @devad: MMD DEVAD
990 * @addr: PHY address on the MII bus
991 *
992 * Description: it reads data from the MMD registers (clause 22 to access to
993 * clause 45) of the specified phy address.
994 * To read these register we have:
995 * 1) Write reg 13 // DEVAD
996 * 2) Write reg 14 // MMD Address
997 * 3) Write reg 13 // MMD Data Command for MMD DEVAD
998 * 3) Read reg 14 // Read MMD data
999 */
1000static int phy_read_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
1001 int addr)
1002{
1003 u32 ret;
1004
1005 mmd_phy_indirect(bus, prtad, devad, addr);
1006
1007 /* Read the content of the MMD's selected register */
1008 ret = bus->read(bus, addr, MII_MMD_DATA);
1009
1010 return ret;
1011}
1012
1013/**
1014 * phy_write_mmd_indirect - writes data to the MMD registers
1015 * @bus: the target MII bus
1016 * @prtad: MMD Address
1017 * @devad: MMD DEVAD
1018 * @addr: PHY address on the MII bus
1019 * @data: data to write in the MMD register
1020 *
1021 * Description: Write data from the MMD registers of the specified
1022 * phy address.
1023 * To write these register we have:
1024 * 1) Write reg 13 // DEVAD
1025 * 2) Write reg 14 // MMD Address
1026 * 3) Write reg 13 // MMD Data Command for MMD DEVAD
1027 * 3) Write reg 14 // Write MMD data
1028 */
1029static void phy_write_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
1030 int addr, u32 data)
1031{
1032 mmd_phy_indirect(bus, prtad, devad, addr);
1033
1034 /* Write the data into MMD's selected register */
1035 bus->write(bus, addr, MII_MMD_DATA, data);
1036}
1037
1038static u32 phy_eee_to_adv(u16 eee_adv)
1039{
1040 u32 adv = 0;
1041
1042 if (eee_adv & MDIO_EEE_100TX)
1043 adv |= ADVERTISED_100baseT_Full;
1044 if (eee_adv & MDIO_EEE_1000T)
1045 adv |= ADVERTISED_1000baseT_Full;
1046 if (eee_adv & MDIO_EEE_10GT)
1047 adv |= ADVERTISED_10000baseT_Full;
1048 if (eee_adv & MDIO_EEE_1000KX)
1049 adv |= ADVERTISED_1000baseKX_Full;
1050 if (eee_adv & MDIO_EEE_10GKX4)
1051 adv |= ADVERTISED_10000baseKX4_Full;
1052 if (eee_adv & MDIO_EEE_10GKR)
1053 adv |= ADVERTISED_10000baseKR_Full;
1054
1055 return adv;
1056}
1057
1058static u32 phy_eee_to_supported(u16 eee_caported)
1059{
1060 u32 supported = 0;
1061
1062 if (eee_caported & MDIO_EEE_100TX)
1063 supported |= SUPPORTED_100baseT_Full;
1064 if (eee_caported & MDIO_EEE_1000T)
1065 supported |= SUPPORTED_1000baseT_Full;
1066 if (eee_caported & MDIO_EEE_10GT)
1067 supported |= SUPPORTED_10000baseT_Full;
1068 if (eee_caported & MDIO_EEE_1000KX)
1069 supported |= SUPPORTED_1000baseKX_Full;
1070 if (eee_caported & MDIO_EEE_10GKX4)
1071 supported |= SUPPORTED_10000baseKX4_Full;
1072 if (eee_caported & MDIO_EEE_10GKR)
1073 supported |= SUPPORTED_10000baseKR_Full;
1074
1075 return supported;
1076}
1077
1078static u16 phy_adv_to_eee(u32 adv)
1079{
1080 u16 reg = 0;
1081
1082 if (adv & ADVERTISED_100baseT_Full)
1083 reg |= MDIO_EEE_100TX;
1084 if (adv & ADVERTISED_1000baseT_Full)
1085 reg |= MDIO_EEE_1000T;
1086 if (adv & ADVERTISED_10000baseT_Full)
1087 reg |= MDIO_EEE_10GT;
1088 if (adv & ADVERTISED_1000baseKX_Full)
1089 reg |= MDIO_EEE_1000KX;
1090 if (adv & ADVERTISED_10000baseKX4_Full)
1091 reg |= MDIO_EEE_10GKX4;
1092 if (adv & ADVERTISED_10000baseKR_Full)
1093 reg |= MDIO_EEE_10GKR;
1094
1095 return reg;
1096}
1097
1098/**
1099 * phy_init_eee - init and check the EEE feature
1100 * @phydev: target phy_device struct
1101 * @clk_stop_enable: PHY may stop the clock during LPI
1102 *
1103 * Description: it checks if the Energy-Efficient Ethernet (EEE)
1104 * is supported by looking at the MMD registers 3.20 and 7.60/61
1105 * and it programs the MMD register 3.0 setting the "Clock stop enable"
1106 * bit if required.
1107 */
1108int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
1109{
1110 int ret = -EPROTONOSUPPORT;
1111
1112 /* According to 802.3az,the EEE is supported only in full duplex-mode.
1113 * Also EEE feature is active when core is operating with MII, GMII
1114 * or RGMII.
1115 */
1116 if ((phydev->duplex == DUPLEX_FULL) &&
1117 ((phydev->interface == PHY_INTERFACE_MODE_MII) ||
1118 (phydev->interface == PHY_INTERFACE_MODE_GMII) ||
1119 (phydev->interface == PHY_INTERFACE_MODE_RGMII))) {
1120 int eee_lp, eee_cap, eee_adv;
1121 u32 lp, cap, adv;
1122 int idx, status;
1123
1124 /* Read phy status to properly get the right settings */
1125 status = phy_read_status(phydev);
1126 if (status)
1127 return status;
1128
1129 /* First check if the EEE ability is supported */
1130 eee_cap = phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_ABLE,
1131 MDIO_MMD_PCS, phydev->addr);
1132 if (eee_cap < 0)
1133 return eee_cap;
1134
1135 cap = phy_eee_to_supported(eee_cap);
1136 if (!cap)
1137 goto eee_exit;
1138
1139 /* Check which link settings negotiated and verify it in
1140 * the EEE advertising registers.
1141 */
1142 eee_lp = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE,
1143 MDIO_MMD_AN, phydev->addr);
1144 if (eee_lp < 0)
1145 return eee_lp;
1146
1147 eee_adv = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV,
1148 MDIO_MMD_AN, phydev->addr);
1149 if (eee_adv < 0)
1150 return eee_adv;
1151
1152 adv = phy_eee_to_adv(eee_adv);
1153 lp = phy_eee_to_adv(eee_lp);
1154 idx = phy_find_setting(phydev->speed, phydev->duplex);
1155 if ((lp & adv & settings[idx].setting))
1156 goto eee_exit;
1157
1158 if (clk_stop_enable) {
1159 /* Configure the PHY to stop receiving xMII
1160 * clock while it is signaling LPI.
1161 */
1162 int val = phy_read_mmd_indirect(phydev->bus, MDIO_CTRL1,
1163 MDIO_MMD_PCS,
1164 phydev->addr);
1165 if (val < 0)
1166 return val;
1167
1168 val |= MDIO_PCS_CTRL1_CLKSTOP_EN;
1169 phy_write_mmd_indirect(phydev->bus, MDIO_CTRL1,
1170 MDIO_MMD_PCS, phydev->addr, val);
1171 }
1172
1173 ret = 0; /* EEE supported */
1174 }
1175
1176eee_exit:
1177 return ret;
1178}
1179EXPORT_SYMBOL(phy_init_eee);
1180
1181/**
1182 * phy_get_eee_err - report the EEE wake error count
1183 * @phydev: target phy_device struct
1184 *
1185 * Description: it is to report the number of time where the PHY
1186 * failed to complete its normal wake sequence.
1187 */
1188int phy_get_eee_err(struct phy_device *phydev)
1189{
1190 return phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_WK_ERR,
1191 MDIO_MMD_PCS, phydev->addr);
1192
1193}
1194EXPORT_SYMBOL(phy_get_eee_err);
1195
1196/**
1197 * phy_ethtool_get_eee - get EEE supported and status
1198 * @phydev: target phy_device struct
1199 * @data: ethtool_eee data
1200 *
1201 * Description: it reportes the Supported/Advertisement/LP Advertisement
1202 * capabilities.
1203 */
1204int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
1205{
1206 int val;
1207
1208 /* Get Supported EEE */
1209 val = phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_ABLE,
1210 MDIO_MMD_PCS, phydev->addr);
1211 if (val < 0)
1212 return val;
1213 data->supported = phy_eee_to_supported(val);
1214
1215 /* Get advertisement EEE */
1216 val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV,
1217 MDIO_MMD_AN, phydev->addr);
1218 if (val < 0)
1219 return val;
1220 data->advertised = phy_eee_to_adv(val);
1221
1222 /* Get LP advertisement EEE */
1223 val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE,
1224 MDIO_MMD_AN, phydev->addr);
1225 if (val < 0)
1226 return val;
1227 data->lp_advertised = phy_eee_to_adv(val);
1228
1229 return 0;
1230}
1231EXPORT_SYMBOL(phy_ethtool_get_eee);
1232
1233/**
1234 * phy_ethtool_set_eee - set EEE supported and status
1235 * @phydev: target phy_device struct
1236 * @data: ethtool_eee data
1237 *
1238 * Description: it is to program the Advertisement EEE register.
1239 */
1240int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
1241{
1242 int val;
1243
1244 val = phy_adv_to_eee(data->advertised);
1245 phy_write_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, MDIO_MMD_AN,
1246 phydev->addr, val);
1247
1248 return 0;
1249}
1250EXPORT_SYMBOL(phy_ethtool_set_eee);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index de86a5582224..8af46e88a181 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -14,6 +14,9 @@
14 * option) any later version. 14 * option) any later version.
15 * 15 *
16 */ 16 */
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
17#include <linux/kernel.h> 20#include <linux/kernel.h>
18#include <linux/string.h> 21#include <linux/string.h>
19#include <linux/errno.h> 22#include <linux/errno.h>
@@ -149,8 +152,8 @@ int phy_scan_fixups(struct phy_device *phydev)
149} 152}
150EXPORT_SYMBOL(phy_scan_fixups); 153EXPORT_SYMBOL(phy_scan_fixups);
151 154
152static struct phy_device* phy_device_create(struct mii_bus *bus, 155struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
153 int addr, int phy_id) 156 bool is_c45, struct phy_c45_device_ids *c45_ids)
154{ 157{
155 struct phy_device *dev; 158 struct phy_device *dev;
156 159
@@ -171,8 +174,11 @@ static struct phy_device* phy_device_create(struct mii_bus *bus,
171 174
172 dev->autoneg = AUTONEG_ENABLE; 175 dev->autoneg = AUTONEG_ENABLE;
173 176
177 dev->is_c45 = is_c45;
174 dev->addr = addr; 178 dev->addr = addr;
175 dev->phy_id = phy_id; 179 dev->phy_id = phy_id;
180 if (c45_ids)
181 dev->c45_ids = *c45_ids;
176 dev->bus = bus; 182 dev->bus = bus;
177 dev->dev.parent = bus->parent; 183 dev->dev.parent = bus->parent;
178 dev->dev.bus = &mdio_bus_type; 184 dev->dev.bus = &mdio_bus_type;
@@ -197,20 +203,99 @@ static struct phy_device* phy_device_create(struct mii_bus *bus,
197 203
198 return dev; 204 return dev;
199} 205}
206EXPORT_SYMBOL(phy_device_create);
207
208/**
209 * get_phy_c45_ids - reads the specified addr for its 802.3-c45 IDs.
210 * @bus: the target MII bus
211 * @addr: PHY address on the MII bus
212 * @phy_id: where to store the ID retrieved.
213 * @c45_ids: where to store the c45 ID information.
214 *
215 * If the PHY devices-in-package appears to be valid, it and the
216 * corresponding identifiers are stored in @c45_ids, zero is stored
217 * in @phy_id. Otherwise 0xffffffff is stored in @phy_id. Returns
218 * zero on success.
219 *
220 */
221static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
222 struct phy_c45_device_ids *c45_ids) {
223 int phy_reg;
224 int i, reg_addr;
225 const int num_ids = ARRAY_SIZE(c45_ids->device_ids);
226
227 /* Find first non-zero Devices In package. Device
228 * zero is reserved, so don't probe it.
229 */
230 for (i = 1;
231 i < num_ids && c45_ids->devices_in_package == 0;
232 i++) {
233 reg_addr = MII_ADDR_C45 | i << 16 | 6;
234 phy_reg = mdiobus_read(bus, addr, reg_addr);
235 if (phy_reg < 0)
236 return -EIO;
237 c45_ids->devices_in_package = (phy_reg & 0xffff) << 16;
238
239 reg_addr = MII_ADDR_C45 | i << 16 | 5;
240 phy_reg = mdiobus_read(bus, addr, reg_addr);
241 if (phy_reg < 0)
242 return -EIO;
243 c45_ids->devices_in_package |= (phy_reg & 0xffff);
244
245 /* If mostly Fs, there is no device there,
246 * let's get out of here.
247 */
248 if ((c45_ids->devices_in_package & 0x1fffffff) == 0x1fffffff) {
249 *phy_id = 0xffffffff;
250 return 0;
251 }
252 }
253
254 /* Now probe Device Identifiers for each device present. */
255 for (i = 1; i < num_ids; i++) {
256 if (!(c45_ids->devices_in_package & (1 << i)))
257 continue;
258
259 reg_addr = MII_ADDR_C45 | i << 16 | MII_PHYSID1;
260 phy_reg = mdiobus_read(bus, addr, reg_addr);
261 if (phy_reg < 0)
262 return -EIO;
263 c45_ids->device_ids[i] = (phy_reg & 0xffff) << 16;
264
265 reg_addr = MII_ADDR_C45 | i << 16 | MII_PHYSID2;
266 phy_reg = mdiobus_read(bus, addr, reg_addr);
267 if (phy_reg < 0)
268 return -EIO;
269 c45_ids->device_ids[i] |= (phy_reg & 0xffff);
270 }
271 *phy_id = 0;
272 return 0;
273}
200 274
201/** 275/**
202 * get_phy_id - reads the specified addr for its ID. 276 * get_phy_id - reads the specified addr for its ID.
203 * @bus: the target MII bus 277 * @bus: the target MII bus
204 * @addr: PHY address on the MII bus 278 * @addr: PHY address on the MII bus
205 * @phy_id: where to store the ID retrieved. 279 * @phy_id: where to store the ID retrieved.
280 * @is_c45: If true the PHY uses the 802.3 clause 45 protocol
281 * @c45_ids: where to store the c45 ID information.
282 *
283 * Description: In the case of a 802.3-c22 PHY, reads the ID registers
284 * of the PHY at @addr on the @bus, stores it in @phy_id and returns
285 * zero on success.
286 *
287 * In the case of a 802.3-c45 PHY, get_phy_c45_ids() is invoked, and
288 * its return value is in turn returned.
206 * 289 *
207 * Description: Reads the ID registers of the PHY at @addr on the
208 * @bus, stores it in @phy_id and returns zero on success.
209 */ 290 */
210static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id) 291static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
292 bool is_c45, struct phy_c45_device_ids *c45_ids)
211{ 293{
212 int phy_reg; 294 int phy_reg;
213 295
296 if (is_c45)
297 return get_phy_c45_ids(bus, addr, phy_id, c45_ids);
298
214 /* Grab the bits from PHYIR1, and put them 299 /* Grab the bits from PHYIR1, and put them
215 * in the upper half */ 300 * in the upper half */
216 phy_reg = mdiobus_read(bus, addr, MII_PHYSID1); 301 phy_reg = mdiobus_read(bus, addr, MII_PHYSID1);
@@ -235,17 +320,19 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id)
235 * get_phy_device - reads the specified PHY device and returns its @phy_device struct 320 * get_phy_device - reads the specified PHY device and returns its @phy_device struct
236 * @bus: the target MII bus 321 * @bus: the target MII bus
237 * @addr: PHY address on the MII bus 322 * @addr: PHY address on the MII bus
323 * @is_c45: If true the PHY uses the 802.3 clause 45 protocol
238 * 324 *
239 * Description: Reads the ID registers of the PHY at @addr on the 325 * Description: Reads the ID registers of the PHY at @addr on the
240 * @bus, then allocates and returns the phy_device to represent it. 326 * @bus, then allocates and returns the phy_device to represent it.
241 */ 327 */
242struct phy_device * get_phy_device(struct mii_bus *bus, int addr) 328struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
243{ 329{
330 struct phy_c45_device_ids c45_ids = {0};
244 struct phy_device *dev = NULL; 331 struct phy_device *dev = NULL;
245 u32 phy_id; 332 u32 phy_id = 0;
246 int r; 333 int r;
247 334
248 r = get_phy_id(bus, addr, &phy_id); 335 r = get_phy_id(bus, addr, &phy_id, is_c45, &c45_ids);
249 if (r) 336 if (r)
250 return ERR_PTR(r); 337 return ERR_PTR(r);
251 338
@@ -253,7 +340,7 @@ struct phy_device * get_phy_device(struct mii_bus *bus, int addr)
253 if ((phy_id & 0x1fffffff) == 0x1fffffff) 340 if ((phy_id & 0x1fffffff) == 0x1fffffff)
254 return NULL; 341 return NULL;
255 342
256 dev = phy_device_create(bus, addr, phy_id); 343 dev = phy_device_create(bus, addr, phy_id, is_c45, &c45_ids);
257 344
258 return dev; 345 return dev;
259} 346}
@@ -446,6 +533,11 @@ static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
446 /* Assume that if there is no driver, that it doesn't 533 /* Assume that if there is no driver, that it doesn't
447 * exist, and we should use the genphy driver. */ 534 * exist, and we should use the genphy driver. */
448 if (NULL == d->driver) { 535 if (NULL == d->driver) {
536 if (phydev->is_c45) {
537 pr_err("No driver for phy %x\n", phydev->phy_id);
538 return -ENODEV;
539 }
540
449 d->driver = &genphy_driver.driver; 541 d->driver = &genphy_driver.driver;
450 542
451 err = d->driver->probe(d); 543 err = d->driver->probe(d);
@@ -975,8 +1067,8 @@ int phy_driver_register(struct phy_driver *new_driver)
975 retval = driver_register(&new_driver->driver); 1067 retval = driver_register(&new_driver->driver);
976 1068
977 if (retval) { 1069 if (retval) {
978 printk(KERN_ERR "%s: Error %d in registering driver\n", 1070 pr_err("%s: Error %d in registering driver\n",
979 new_driver->name, retval); 1071 new_driver->name, retval);
980 1072
981 return retval; 1073 return retval;
982 } 1074 }
@@ -987,12 +1079,37 @@ int phy_driver_register(struct phy_driver *new_driver)
987} 1079}
988EXPORT_SYMBOL(phy_driver_register); 1080EXPORT_SYMBOL(phy_driver_register);
989 1081
1082int phy_drivers_register(struct phy_driver *new_driver, int n)
1083{
1084 int i, ret = 0;
1085
1086 for (i = 0; i < n; i++) {
1087 ret = phy_driver_register(new_driver + i);
1088 if (ret) {
1089 while (i-- > 0)
1090 phy_driver_unregister(new_driver + i);
1091 break;
1092 }
1093 }
1094 return ret;
1095}
1096EXPORT_SYMBOL(phy_drivers_register);
1097
990void phy_driver_unregister(struct phy_driver *drv) 1098void phy_driver_unregister(struct phy_driver *drv)
991{ 1099{
992 driver_unregister(&drv->driver); 1100 driver_unregister(&drv->driver);
993} 1101}
994EXPORT_SYMBOL(phy_driver_unregister); 1102EXPORT_SYMBOL(phy_driver_unregister);
995 1103
1104void phy_drivers_unregister(struct phy_driver *drv, int n)
1105{
1106 int i;
1107 for (i = 0; i < n; i++) {
1108 phy_driver_unregister(drv + i);
1109 }
1110}
1111EXPORT_SYMBOL(phy_drivers_unregister);
1112
996static struct phy_driver genphy_driver = { 1113static struct phy_driver genphy_driver = {
997 .phy_id = 0xffffffff, 1114 .phy_id = 0xffffffff,
998 .phy_id_mask = 0xffffffff, 1115 .phy_id_mask = 0xffffffff,
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index f414ffb5b728..72f93470ea35 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -65,11 +65,7 @@ static struct phy_driver rtl821x_driver = {
65 65
66static int __init realtek_init(void) 66static int __init realtek_init(void)
67{ 67{
68 int ret; 68 return phy_driver_register(&rtl821x_driver);
69
70 ret = phy_driver_register(&rtl821x_driver);
71
72 return ret;
73} 69}
74 70
75static void __exit realtek_exit(void) 71static void __exit realtek_exit(void)
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index fc3e7e96c88c..c6b06d311fee 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -61,7 +61,8 @@ static int lan911x_config_init(struct phy_device *phydev)
61 return smsc_phy_ack_interrupt(phydev); 61 return smsc_phy_ack_interrupt(phydev);
62} 62}
63 63
64static struct phy_driver lan83c185_driver = { 64static struct phy_driver smsc_phy_driver[] = {
65{
65 .phy_id = 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */ 66 .phy_id = 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */
66 .phy_id_mask = 0xfffffff0, 67 .phy_id_mask = 0xfffffff0,
67 .name = "SMSC LAN83C185", 68 .name = "SMSC LAN83C185",
@@ -83,9 +84,7 @@ static struct phy_driver lan83c185_driver = {
83 .resume = genphy_resume, 84 .resume = genphy_resume,
84 85
85 .driver = { .owner = THIS_MODULE, } 86 .driver = { .owner = THIS_MODULE, }
86}; 87}, {
87
88static struct phy_driver lan8187_driver = {
89 .phy_id = 0x0007c0b0, /* OUI=0x00800f, Model#=0x0b */ 88 .phy_id = 0x0007c0b0, /* OUI=0x00800f, Model#=0x0b */
90 .phy_id_mask = 0xfffffff0, 89 .phy_id_mask = 0xfffffff0,
91 .name = "SMSC LAN8187", 90 .name = "SMSC LAN8187",
@@ -107,9 +106,7 @@ static struct phy_driver lan8187_driver = {
107 .resume = genphy_resume, 106 .resume = genphy_resume,
108 107
109 .driver = { .owner = THIS_MODULE, } 108 .driver = { .owner = THIS_MODULE, }
110}; 109}, {
111
112static struct phy_driver lan8700_driver = {
113 .phy_id = 0x0007c0c0, /* OUI=0x00800f, Model#=0x0c */ 110 .phy_id = 0x0007c0c0, /* OUI=0x00800f, Model#=0x0c */
114 .phy_id_mask = 0xfffffff0, 111 .phy_id_mask = 0xfffffff0,
115 .name = "SMSC LAN8700", 112 .name = "SMSC LAN8700",
@@ -131,9 +128,7 @@ static struct phy_driver lan8700_driver = {
131 .resume = genphy_resume, 128 .resume = genphy_resume,
132 129
133 .driver = { .owner = THIS_MODULE, } 130 .driver = { .owner = THIS_MODULE, }
134}; 131}, {
135
136static struct phy_driver lan911x_int_driver = {
137 .phy_id = 0x0007c0d0, /* OUI=0x00800f, Model#=0x0d */ 132 .phy_id = 0x0007c0d0, /* OUI=0x00800f, Model#=0x0d */
138 .phy_id_mask = 0xfffffff0, 133 .phy_id_mask = 0xfffffff0,
139 .name = "SMSC LAN911x Internal PHY", 134 .name = "SMSC LAN911x Internal PHY",
@@ -155,9 +150,7 @@ static struct phy_driver lan911x_int_driver = {
155 .resume = genphy_resume, 150 .resume = genphy_resume,
156 151
157 .driver = { .owner = THIS_MODULE, } 152 .driver = { .owner = THIS_MODULE, }
158}; 153}, {
159
160static struct phy_driver lan8710_driver = {
161 .phy_id = 0x0007c0f0, /* OUI=0x00800f, Model#=0x0f */ 154 .phy_id = 0x0007c0f0, /* OUI=0x00800f, Model#=0x0f */
162 .phy_id_mask = 0xfffffff0, 155 .phy_id_mask = 0xfffffff0,
163 .name = "SMSC LAN8710/LAN8720", 156 .name = "SMSC LAN8710/LAN8720",
@@ -179,53 +172,18 @@ static struct phy_driver lan8710_driver = {
179 .resume = genphy_resume, 172 .resume = genphy_resume,
180 173
181 .driver = { .owner = THIS_MODULE, } 174 .driver = { .owner = THIS_MODULE, }
182}; 175} };
183 176
184static int __init smsc_init(void) 177static int __init smsc_init(void)
185{ 178{
186 int ret; 179 return phy_drivers_register(smsc_phy_driver,
187 180 ARRAY_SIZE(smsc_phy_driver));
188 ret = phy_driver_register (&lan83c185_driver);
189 if (ret)
190 goto err1;
191
192 ret = phy_driver_register (&lan8187_driver);
193 if (ret)
194 goto err2;
195
196 ret = phy_driver_register (&lan8700_driver);
197 if (ret)
198 goto err3;
199
200 ret = phy_driver_register (&lan911x_int_driver);
201 if (ret)
202 goto err4;
203
204 ret = phy_driver_register (&lan8710_driver);
205 if (ret)
206 goto err5;
207
208 return 0;
209
210err5:
211 phy_driver_unregister (&lan911x_int_driver);
212err4:
213 phy_driver_unregister (&lan8700_driver);
214err3:
215 phy_driver_unregister (&lan8187_driver);
216err2:
217 phy_driver_unregister (&lan83c185_driver);
218err1:
219 return ret;
220} 181}
221 182
222static void __exit smsc_exit(void) 183static void __exit smsc_exit(void)
223{ 184{
224 phy_driver_unregister (&lan8710_driver); 185 return phy_drivers_unregister(smsc_phy_driver,
225 phy_driver_unregister (&lan911x_int_driver); 186 ARRAY_SIZE(smsc_phy_driver));
226 phy_driver_unregister (&lan8700_driver);
227 phy_driver_unregister (&lan8187_driver);
228 phy_driver_unregister (&lan83c185_driver);
229} 187}
230 188
231MODULE_DESCRIPTION("SMSC PHY driver"); 189MODULE_DESCRIPTION("SMSC PHY driver");
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 4eb98bc52a0a..1c3abce78b6a 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -11,6 +11,8 @@
11 * by the Free Software Foundation. 11 * by the Free Software Foundation.
12 */ 12 */
13 13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
14#include <linux/types.h> 16#include <linux/types.h>
15#include <linux/kernel.h> 17#include <linux/kernel.h>
16#include <linux/init.h> 18#include <linux/init.h>
@@ -356,7 +358,7 @@ static struct spi_driver ks8995_driver = {
356 358
357static int __init ks8995_init(void) 359static int __init ks8995_init(void)
358{ 360{
359 printk(KERN_INFO DRV_DESC " version " DRV_VERSION"\n"); 361 pr_info(DRV_DESC " version " DRV_VERSION "\n");
360 362
361 return spi_register_driver(&ks8995_driver); 363 return spi_register_driver(&ks8995_driver);
362} 364}
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index 187a2fa814f2..5e1eb138916f 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -81,7 +81,8 @@ static int ste10Xp_ack_interrupt(struct phy_device *phydev)
81 return 0; 81 return 0;
82} 82}
83 83
84static struct phy_driver ste101p_pdriver = { 84static struct phy_driver ste10xp_pdriver[] = {
85{
85 .phy_id = STE101P_PHY_ID, 86 .phy_id = STE101P_PHY_ID,
86 .phy_id_mask = 0xfffffff0, 87 .phy_id_mask = 0xfffffff0,
87 .name = "STe101p", 88 .name = "STe101p",
@@ -95,9 +96,7 @@ static struct phy_driver ste101p_pdriver = {
95 .suspend = genphy_suspend, 96 .suspend = genphy_suspend,
96 .resume = genphy_resume, 97 .resume = genphy_resume,
97 .driver = {.owner = THIS_MODULE,} 98 .driver = {.owner = THIS_MODULE,}
98}; 99}, {
99
100static struct phy_driver ste100p_pdriver = {
101 .phy_id = STE100P_PHY_ID, 100 .phy_id = STE100P_PHY_ID,
102 .phy_id_mask = 0xffffffff, 101 .phy_id_mask = 0xffffffff,
103 .name = "STe100p", 102 .name = "STe100p",
@@ -111,22 +110,18 @@ static struct phy_driver ste100p_pdriver = {
111 .suspend = genphy_suspend, 110 .suspend = genphy_suspend,
112 .resume = genphy_resume, 111 .resume = genphy_resume,
113 .driver = {.owner = THIS_MODULE,} 112 .driver = {.owner = THIS_MODULE,}
114}; 113} };
115 114
116static int __init ste10Xp_init(void) 115static int __init ste10Xp_init(void)
117{ 116{
118 int retval; 117 return phy_drivers_register(ste10xp_pdriver,
119 118 ARRAY_SIZE(ste10xp_pdriver));
120 retval = phy_driver_register(&ste100p_pdriver);
121 if (retval < 0)
122 return retval;
123 return phy_driver_register(&ste101p_pdriver);
124} 119}
125 120
126static void __exit ste10Xp_exit(void) 121static void __exit ste10Xp_exit(void)
127{ 122{
128 phy_driver_unregister(&ste100p_pdriver); 123 phy_drivers_unregister(ste10xp_pdriver,
129 phy_driver_unregister(&ste101p_pdriver); 124 ARRAY_SIZE(ste10xp_pdriver));
130} 125}
131 126
132module_init(ste10Xp_init); 127module_init(ste10Xp_init);
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 0ec8e09cc2ac..2585c383e623 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -138,21 +138,6 @@ static int vsc82xx_config_intr(struct phy_device *phydev)
138 return err; 138 return err;
139} 139}
140 140
141/* Vitesse 824x */
142static struct phy_driver vsc8244_driver = {
143 .phy_id = PHY_ID_VSC8244,
144 .name = "Vitesse VSC8244",
145 .phy_id_mask = 0x000fffc0,
146 .features = PHY_GBIT_FEATURES,
147 .flags = PHY_HAS_INTERRUPT,
148 .config_init = &vsc824x_config_init,
149 .config_aneg = &genphy_config_aneg,
150 .read_status = &genphy_read_status,
151 .ack_interrupt = &vsc824x_ack_interrupt,
152 .config_intr = &vsc82xx_config_intr,
153 .driver = { .owner = THIS_MODULE,},
154};
155
156static int vsc8221_config_init(struct phy_device *phydev) 141static int vsc8221_config_init(struct phy_device *phydev)
157{ 142{
158 int err; 143 int err;
@@ -165,8 +150,22 @@ static int vsc8221_config_init(struct phy_device *phydev)
165 Options are 802.3Z SerDes or SGMII */ 150 Options are 802.3Z SerDes or SGMII */
166} 151}
167 152
168/* Vitesse 8221 */ 153/* Vitesse 824x */
169static struct phy_driver vsc8221_driver = { 154static struct phy_driver vsc82xx_driver[] = {
155{
156 .phy_id = PHY_ID_VSC8244,
157 .name = "Vitesse VSC8244",
158 .phy_id_mask = 0x000fffc0,
159 .features = PHY_GBIT_FEATURES,
160 .flags = PHY_HAS_INTERRUPT,
161 .config_init = &vsc824x_config_init,
162 .config_aneg = &genphy_config_aneg,
163 .read_status = &genphy_read_status,
164 .ack_interrupt = &vsc824x_ack_interrupt,
165 .config_intr = &vsc82xx_config_intr,
166 .driver = { .owner = THIS_MODULE,},
167}, {
168 /* Vitesse 8221 */
170 .phy_id = PHY_ID_VSC8221, 169 .phy_id = PHY_ID_VSC8221,
171 .phy_id_mask = 0x000ffff0, 170 .phy_id_mask = 0x000ffff0,
172 .name = "Vitesse VSC8221", 171 .name = "Vitesse VSC8221",
@@ -177,26 +176,19 @@ static struct phy_driver vsc8221_driver = {
177 .read_status = &genphy_read_status, 176 .read_status = &genphy_read_status,
178 .ack_interrupt = &vsc824x_ack_interrupt, 177 .ack_interrupt = &vsc824x_ack_interrupt,
179 .config_intr = &vsc82xx_config_intr, 178 .config_intr = &vsc82xx_config_intr,
180 .driver = { .owner = THIS_MODULE,}, 179 .driver = { .owner = THIS_MODULE,},
181}; 180} };
182 181
183static int __init vsc82xx_init(void) 182static int __init vsc82xx_init(void)
184{ 183{
185 int err; 184 return phy_drivers_register(vsc82xx_driver,
186 185 ARRAY_SIZE(vsc82xx_driver));
187 err = phy_driver_register(&vsc8244_driver);
188 if (err < 0)
189 return err;
190 err = phy_driver_register(&vsc8221_driver);
191 if (err < 0)
192 phy_driver_unregister(&vsc8244_driver);
193 return err;
194} 186}
195 187
196static void __exit vsc82xx_exit(void) 188static void __exit vsc82xx_exit(void)
197{ 189{
198 phy_driver_unregister(&vsc8244_driver); 190 return phy_drivers_unregister(vsc82xx_driver,
199 phy_driver_unregister(&vsc8221_driver); 191 ARRAY_SIZE(vsc82xx_driver));
200} 192}
201 193
202module_init(vsc82xx_init); 194module_init(vsc82xx_init);
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index d4c9db3da22a..a34d6bf5e43b 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -390,10 +390,10 @@ static void sl_encaps(struct slip *sl, unsigned char *icp, int len)
390#endif 390#endif
391#ifdef CONFIG_SLIP_MODE_SLIP6 391#ifdef CONFIG_SLIP_MODE_SLIP6
392 if (sl->mode & SL_MODE_SLIP6) 392 if (sl->mode & SL_MODE_SLIP6)
393 count = slip_esc6(p, (unsigned char *) sl->xbuff, len); 393 count = slip_esc6(p, sl->xbuff, len);
394 else 394 else
395#endif 395#endif
396 count = slip_esc(p, (unsigned char *) sl->xbuff, len); 396 count = slip_esc(p, sl->xbuff, len);
397 397
398 /* Order of next two lines is *very* important. 398 /* Order of next two lines is *very* important.
399 * When we are sending a little amount of data, 399 * When we are sending a little amount of data,
diff --git a/drivers/net/team/Kconfig b/drivers/net/team/Kconfig
index 89024d5fc33a..6a7260b03a1e 100644
--- a/drivers/net/team/Kconfig
+++ b/drivers/net/team/Kconfig
@@ -15,6 +15,17 @@ menuconfig NET_TEAM
15 15
16if NET_TEAM 16if NET_TEAM
17 17
18config NET_TEAM_MODE_BROADCAST
19 tristate "Broadcast mode support"
20 depends on NET_TEAM
21 ---help---
22 Basic mode where packets are transmitted always by all suitable ports.
23
24 All added ports are setup to have team's mac address.
25
26 To compile this team mode as a module, choose M here: the module
27 will be called team_mode_broadcast.
28
18config NET_TEAM_MODE_ROUNDROBIN 29config NET_TEAM_MODE_ROUNDROBIN
19 tristate "Round-robin mode support" 30 tristate "Round-robin mode support"
20 depends on NET_TEAM 31 depends on NET_TEAM
@@ -22,7 +33,7 @@ config NET_TEAM_MODE_ROUNDROBIN
22 Basic mode where port used for transmitting packets is selected in 33 Basic mode where port used for transmitting packets is selected in
23 round-robin fashion using packet counter. 34 round-robin fashion using packet counter.
24 35
25 All added ports are setup to have bond's mac address. 36 All added ports are setup to have team's mac address.
26 37
27 To compile this team mode as a module, choose M here: the module 38 To compile this team mode as a module, choose M here: the module
28 will be called team_mode_roundrobin. 39 will be called team_mode_roundrobin.
diff --git a/drivers/net/team/Makefile b/drivers/net/team/Makefile
index fb9f4c1c51ff..975763014e5a 100644
--- a/drivers/net/team/Makefile
+++ b/drivers/net/team/Makefile
@@ -3,6 +3,7 @@
3# 3#
4 4
5obj-$(CONFIG_NET_TEAM) += team.o 5obj-$(CONFIG_NET_TEAM) += team.o
6obj-$(CONFIG_NET_TEAM_MODE_BROADCAST) += team_mode_broadcast.o
6obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o 7obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o
7obj-$(CONFIG_NET_TEAM_MODE_ACTIVEBACKUP) += team_mode_activebackup.o 8obj-$(CONFIG_NET_TEAM_MODE_ACTIVEBACKUP) += team_mode_activebackup.o
8obj-$(CONFIG_NET_TEAM_MODE_LOADBALANCE) += team_mode_loadbalance.o 9obj-$(CONFIG_NET_TEAM_MODE_LOADBALANCE) += team_mode_loadbalance.o
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index c61ae35a53ce..813e1319095f 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * net/drivers/team/team.c - Network team device driver 2 * drivers/net/team/team.c - Network team device driver
3 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com> 3 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
@@ -18,6 +18,7 @@
18#include <linux/ctype.h> 18#include <linux/ctype.h>
19#include <linux/notifier.h> 19#include <linux/notifier.h>
20#include <linux/netdevice.h> 20#include <linux/netdevice.h>
21#include <linux/netpoll.h>
21#include <linux/if_vlan.h> 22#include <linux/if_vlan.h>
22#include <linux/if_arp.h> 23#include <linux/if_arp.h>
23#include <linux/socket.h> 24#include <linux/socket.h>
@@ -82,14 +83,16 @@ static void team_refresh_port_linkup(struct team_port *port)
82 port->state.linkup; 83 port->state.linkup;
83} 84}
84 85
86
85/******************* 87/*******************
86 * Options handling 88 * Options handling
87 *******************/ 89 *******************/
88 90
89struct team_option_inst { /* One for each option instance */ 91struct team_option_inst { /* One for each option instance */
90 struct list_head list; 92 struct list_head list;
93 struct list_head tmp_list;
91 struct team_option *option; 94 struct team_option *option;
92 struct team_port *port; /* != NULL if per-port */ 95 struct team_option_inst_info info;
93 bool changed; 96 bool changed;
94 bool removed; 97 bool removed;
95}; 98};
@@ -106,22 +109,6 @@ static struct team_option *__team_find_option(struct team *team,
106 return NULL; 109 return NULL;
107} 110}
108 111
109static int __team_option_inst_add(struct team *team, struct team_option *option,
110 struct team_port *port)
111{
112 struct team_option_inst *opt_inst;
113
114 opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
115 if (!opt_inst)
116 return -ENOMEM;
117 opt_inst->option = option;
118 opt_inst->port = port;
119 opt_inst->changed = true;
120 opt_inst->removed = false;
121 list_add_tail(&opt_inst->list, &team->option_inst_list);
122 return 0;
123}
124
125static void __team_option_inst_del(struct team_option_inst *opt_inst) 112static void __team_option_inst_del(struct team_option_inst *opt_inst)
126{ 113{
127 list_del(&opt_inst->list); 114 list_del(&opt_inst->list);
@@ -139,14 +126,49 @@ static void __team_option_inst_del_option(struct team *team,
139 } 126 }
140} 127}
141 128
129static int __team_option_inst_add(struct team *team, struct team_option *option,
130 struct team_port *port)
131{
132 struct team_option_inst *opt_inst;
133 unsigned int array_size;
134 unsigned int i;
135 int err;
136
137 array_size = option->array_size;
138 if (!array_size)
139 array_size = 1; /* No array but still need one instance */
140
141 for (i = 0; i < array_size; i++) {
142 opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
143 if (!opt_inst)
144 return -ENOMEM;
145 opt_inst->option = option;
146 opt_inst->info.port = port;
147 opt_inst->info.array_index = i;
148 opt_inst->changed = true;
149 opt_inst->removed = false;
150 list_add_tail(&opt_inst->list, &team->option_inst_list);
151 if (option->init) {
152 err = option->init(team, &opt_inst->info);
153 if (err)
154 return err;
155 }
156
157 }
158 return 0;
159}
160
142static int __team_option_inst_add_option(struct team *team, 161static int __team_option_inst_add_option(struct team *team,
143 struct team_option *option) 162 struct team_option *option)
144{ 163{
145 struct team_port *port; 164 struct team_port *port;
146 int err; 165 int err;
147 166
148 if (!option->per_port) 167 if (!option->per_port) {
149 return __team_option_inst_add(team, option, 0); 168 err = __team_option_inst_add(team, option, NULL);
169 if (err)
170 goto inst_del_option;
171 }
150 172
151 list_for_each_entry(port, &team->port_list, list) { 173 list_for_each_entry(port, &team->port_list, list) {
152 err = __team_option_inst_add(team, option, port); 174 err = __team_option_inst_add(team, option, port);
@@ -180,7 +202,7 @@ static void __team_option_inst_del_port(struct team *team,
180 202
181 list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) { 203 list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
182 if (opt_inst->option->per_port && 204 if (opt_inst->option->per_port &&
183 opt_inst->port == port) 205 opt_inst->info.port == port)
184 __team_option_inst_del(opt_inst); 206 __team_option_inst_del(opt_inst);
185 } 207 }
186} 208}
@@ -211,7 +233,7 @@ static void __team_option_inst_mark_removed_port(struct team *team,
211 struct team_option_inst *opt_inst; 233 struct team_option_inst *opt_inst;
212 234
213 list_for_each_entry(opt_inst, &team->option_inst_list, list) { 235 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
214 if (opt_inst->port == port) { 236 if (opt_inst->info.port == port) {
215 opt_inst->changed = true; 237 opt_inst->changed = true;
216 opt_inst->removed = true; 238 opt_inst->removed = true;
217 } 239 }
@@ -324,28 +346,12 @@ void team_options_unregister(struct team *team,
324} 346}
325EXPORT_SYMBOL(team_options_unregister); 347EXPORT_SYMBOL(team_options_unregister);
326 348
327static int team_option_port_add(struct team *team, struct team_port *port)
328{
329 int err;
330
331 err = __team_option_inst_add_port(team, port);
332 if (err)
333 return err;
334 __team_options_change_check(team);
335 return 0;
336}
337
338static void team_option_port_del(struct team *team, struct team_port *port)
339{
340 __team_option_inst_mark_removed_port(team, port);
341 __team_options_change_check(team);
342 __team_option_inst_del_port(team, port);
343}
344
345static int team_option_get(struct team *team, 349static int team_option_get(struct team *team,
346 struct team_option_inst *opt_inst, 350 struct team_option_inst *opt_inst,
347 struct team_gsetter_ctx *ctx) 351 struct team_gsetter_ctx *ctx)
348{ 352{
353 if (!opt_inst->option->getter)
354 return -EOPNOTSUPP;
349 return opt_inst->option->getter(team, ctx); 355 return opt_inst->option->getter(team, ctx);
350} 356}
351 357
@@ -353,16 +359,26 @@ static int team_option_set(struct team *team,
353 struct team_option_inst *opt_inst, 359 struct team_option_inst *opt_inst,
354 struct team_gsetter_ctx *ctx) 360 struct team_gsetter_ctx *ctx)
355{ 361{
356 int err; 362 if (!opt_inst->option->setter)
363 return -EOPNOTSUPP;
364 return opt_inst->option->setter(team, ctx);
365}
357 366
358 err = opt_inst->option->setter(team, ctx); 367void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info)
359 if (err) 368{
360 return err; 369 struct team_option_inst *opt_inst;
361 370
371 opt_inst = container_of(opt_inst_info, struct team_option_inst, info);
362 opt_inst->changed = true; 372 opt_inst->changed = true;
373}
374EXPORT_SYMBOL(team_option_inst_set_change);
375
376void team_options_change_check(struct team *team)
377{
363 __team_options_change_check(team); 378 __team_options_change_check(team);
364 return err;
365} 379}
380EXPORT_SYMBOL(team_options_change_check);
381
366 382
367/**************** 383/****************
368 * Mode handling 384 * Mode handling
@@ -371,13 +387,18 @@ static int team_option_set(struct team *team,
371static LIST_HEAD(mode_list); 387static LIST_HEAD(mode_list);
372static DEFINE_SPINLOCK(mode_list_lock); 388static DEFINE_SPINLOCK(mode_list_lock);
373 389
374static struct team_mode *__find_mode(const char *kind) 390struct team_mode_item {
391 struct list_head list;
392 const struct team_mode *mode;
393};
394
395static struct team_mode_item *__find_mode(const char *kind)
375{ 396{
376 struct team_mode *mode; 397 struct team_mode_item *mitem;
377 398
378 list_for_each_entry(mode, &mode_list, list) { 399 list_for_each_entry(mitem, &mode_list, list) {
379 if (strcmp(mode->kind, kind) == 0) 400 if (strcmp(mitem->mode->kind, kind) == 0)
380 return mode; 401 return mitem;
381 } 402 }
382 return NULL; 403 return NULL;
383} 404}
@@ -392,49 +413,65 @@ static bool is_good_mode_name(const char *name)
392 return true; 413 return true;
393} 414}
394 415
395int team_mode_register(struct team_mode *mode) 416int team_mode_register(const struct team_mode *mode)
396{ 417{
397 int err = 0; 418 int err = 0;
419 struct team_mode_item *mitem;
398 420
399 if (!is_good_mode_name(mode->kind) || 421 if (!is_good_mode_name(mode->kind) ||
400 mode->priv_size > TEAM_MODE_PRIV_SIZE) 422 mode->priv_size > TEAM_MODE_PRIV_SIZE)
401 return -EINVAL; 423 return -EINVAL;
424
425 mitem = kmalloc(sizeof(*mitem), GFP_KERNEL);
426 if (!mitem)
427 return -ENOMEM;
428
402 spin_lock(&mode_list_lock); 429 spin_lock(&mode_list_lock);
403 if (__find_mode(mode->kind)) { 430 if (__find_mode(mode->kind)) {
404 err = -EEXIST; 431 err = -EEXIST;
432 kfree(mitem);
405 goto unlock; 433 goto unlock;
406 } 434 }
407 list_add_tail(&mode->list, &mode_list); 435 mitem->mode = mode;
436 list_add_tail(&mitem->list, &mode_list);
408unlock: 437unlock:
409 spin_unlock(&mode_list_lock); 438 spin_unlock(&mode_list_lock);
410 return err; 439 return err;
411} 440}
412EXPORT_SYMBOL(team_mode_register); 441EXPORT_SYMBOL(team_mode_register);
413 442
414int team_mode_unregister(struct team_mode *mode) 443void team_mode_unregister(const struct team_mode *mode)
415{ 444{
445 struct team_mode_item *mitem;
446
416 spin_lock(&mode_list_lock); 447 spin_lock(&mode_list_lock);
417 list_del_init(&mode->list); 448 mitem = __find_mode(mode->kind);
449 if (mitem) {
450 list_del_init(&mitem->list);
451 kfree(mitem);
452 }
418 spin_unlock(&mode_list_lock); 453 spin_unlock(&mode_list_lock);
419 return 0;
420} 454}
421EXPORT_SYMBOL(team_mode_unregister); 455EXPORT_SYMBOL(team_mode_unregister);
422 456
423static struct team_mode *team_mode_get(const char *kind) 457static const struct team_mode *team_mode_get(const char *kind)
424{ 458{
425 struct team_mode *mode; 459 struct team_mode_item *mitem;
460 const struct team_mode *mode = NULL;
426 461
427 spin_lock(&mode_list_lock); 462 spin_lock(&mode_list_lock);
428 mode = __find_mode(kind); 463 mitem = __find_mode(kind);
429 if (!mode) { 464 if (!mitem) {
430 spin_unlock(&mode_list_lock); 465 spin_unlock(&mode_list_lock);
431 request_module("team-mode-%s", kind); 466 request_module("team-mode-%s", kind);
432 spin_lock(&mode_list_lock); 467 spin_lock(&mode_list_lock);
433 mode = __find_mode(kind); 468 mitem = __find_mode(kind);
434 } 469 }
435 if (mode) 470 if (mitem) {
471 mode = mitem->mode;
436 if (!try_module_get(mode->owner)) 472 if (!try_module_get(mode->owner))
437 mode = NULL; 473 mode = NULL;
474 }
438 475
439 spin_unlock(&mode_list_lock); 476 spin_unlock(&mode_list_lock);
440 return mode; 477 return mode;
@@ -458,26 +495,45 @@ rx_handler_result_t team_dummy_receive(struct team *team,
458 return RX_HANDLER_ANOTHER; 495 return RX_HANDLER_ANOTHER;
459} 496}
460 497
461static void team_adjust_ops(struct team *team) 498static const struct team_mode __team_no_mode = {
499 .kind = "*NOMODE*",
500};
501
502static bool team_is_mode_set(struct team *team)
503{
504 return team->mode != &__team_no_mode;
505}
506
507static void team_set_no_mode(struct team *team)
508{
509 team->mode = &__team_no_mode;
510}
511
512static void __team_adjust_ops(struct team *team, int en_port_count)
462{ 513{
463 /* 514 /*
464 * To avoid checks in rx/tx skb paths, ensure here that non-null and 515 * To avoid checks in rx/tx skb paths, ensure here that non-null and
465 * correct ops are always set. 516 * correct ops are always set.
466 */ 517 */
467 518
468 if (list_empty(&team->port_list) || 519 if (!en_port_count || !team_is_mode_set(team) ||
469 !team->mode || !team->mode->ops->transmit) 520 !team->mode->ops->transmit)
470 team->ops.transmit = team_dummy_transmit; 521 team->ops.transmit = team_dummy_transmit;
471 else 522 else
472 team->ops.transmit = team->mode->ops->transmit; 523 team->ops.transmit = team->mode->ops->transmit;
473 524
474 if (list_empty(&team->port_list) || 525 if (!en_port_count || !team_is_mode_set(team) ||
475 !team->mode || !team->mode->ops->receive) 526 !team->mode->ops->receive)
476 team->ops.receive = team_dummy_receive; 527 team->ops.receive = team_dummy_receive;
477 else 528 else
478 team->ops.receive = team->mode->ops->receive; 529 team->ops.receive = team->mode->ops->receive;
479} 530}
480 531
532static void team_adjust_ops(struct team *team)
533{
534 __team_adjust_ops(team, team->en_port_count);
535}
536
481/* 537/*
482 * We can benefit from the fact that it's ensured no port is present 538 * We can benefit from the fact that it's ensured no port is present
483 * at the time of mode change. Therefore no packets are in fly so there's no 539 * at the time of mode change. Therefore no packets are in fly so there's no
@@ -487,7 +543,7 @@ static int __team_change_mode(struct team *team,
487 const struct team_mode *new_mode) 543 const struct team_mode *new_mode)
488{ 544{
489 /* Check if mode was previously set and do cleanup if so */ 545 /* Check if mode was previously set and do cleanup if so */
490 if (team->mode) { 546 if (team_is_mode_set(team)) {
491 void (*exit_op)(struct team *team) = team->ops.exit; 547 void (*exit_op)(struct team *team) = team->ops.exit;
492 548
493 /* Clear ops area so no callback is called any longer */ 549 /* Clear ops area so no callback is called any longer */
@@ -497,7 +553,7 @@ static int __team_change_mode(struct team *team,
497 if (exit_op) 553 if (exit_op)
498 exit_op(team); 554 exit_op(team);
499 team_mode_put(team->mode); 555 team_mode_put(team->mode);
500 team->mode = NULL; 556 team_set_no_mode(team);
501 /* zero private data area */ 557 /* zero private data area */
502 memset(&team->mode_priv, 0, 558 memset(&team->mode_priv, 0,
503 sizeof(struct team) - offsetof(struct team, mode_priv)); 559 sizeof(struct team) - offsetof(struct team, mode_priv));
@@ -523,7 +579,7 @@ static int __team_change_mode(struct team *team,
523 579
524static int team_change_mode(struct team *team, const char *kind) 580static int team_change_mode(struct team *team, const char *kind)
525{ 581{
526 struct team_mode *new_mode; 582 const struct team_mode *new_mode;
527 struct net_device *dev = team->dev; 583 struct net_device *dev = team->dev;
528 int err; 584 int err;
529 585
@@ -532,7 +588,7 @@ static int team_change_mode(struct team *team, const char *kind)
532 return -EBUSY; 588 return -EBUSY;
533 } 589 }
534 590
535 if (team->mode && strcmp(team->mode->kind, kind) == 0) { 591 if (team_is_mode_set(team) && strcmp(team->mode->kind, kind) == 0) {
536 netdev_err(dev, "Unable to change to the same mode the team is in\n"); 592 netdev_err(dev, "Unable to change to the same mode the team is in\n");
537 return -EINVAL; 593 return -EINVAL;
538 } 594 }
@@ -559,8 +615,6 @@ static int team_change_mode(struct team *team, const char *kind)
559 * Rx path frame handler 615 * Rx path frame handler
560 ************************/ 616 ************************/
561 617
562static bool team_port_enabled(struct team_port *port);
563
564/* note: already called with rcu_read_lock */ 618/* note: already called with rcu_read_lock */
565static rx_handler_result_t team_handle_frame(struct sk_buff **pskb) 619static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
566{ 620{
@@ -618,11 +672,6 @@ static bool team_port_find(const struct team *team,
618 return false; 672 return false;
619} 673}
620 674
621static bool team_port_enabled(struct team_port *port)
622{
623 return port->index != -1;
624}
625
626/* 675/*
627 * Enable/disable port by adding to enabled port hashlist and setting 676 * Enable/disable port by adding to enabled port hashlist and setting
628 * port->index (Might be racy so reader could see incorrect ifindex when 677 * port->index (Might be racy so reader could see incorrect ifindex when
@@ -637,6 +686,9 @@ static void team_port_enable(struct team *team,
637 port->index = team->en_port_count++; 686 port->index = team->en_port_count++;
638 hlist_add_head_rcu(&port->hlist, 687 hlist_add_head_rcu(&port->hlist,
639 team_port_index_hash(team, port->index)); 688 team_port_index_hash(team, port->index));
689 team_adjust_ops(team);
690 if (team->ops.port_enabled)
691 team->ops.port_enabled(team, port);
640} 692}
641 693
642static void __reconstruct_port_hlist(struct team *team, int rm_index) 694static void __reconstruct_port_hlist(struct team *team, int rm_index)
@@ -656,14 +708,20 @@ static void __reconstruct_port_hlist(struct team *team, int rm_index)
656static void team_port_disable(struct team *team, 708static void team_port_disable(struct team *team,
657 struct team_port *port) 709 struct team_port *port)
658{ 710{
659 int rm_index = port->index;
660
661 if (!team_port_enabled(port)) 711 if (!team_port_enabled(port))
662 return; 712 return;
713 if (team->ops.port_disabled)
714 team->ops.port_disabled(team, port);
663 hlist_del_rcu(&port->hlist); 715 hlist_del_rcu(&port->hlist);
664 __reconstruct_port_hlist(team, rm_index); 716 __reconstruct_port_hlist(team, port->index);
665 team->en_port_count--;
666 port->index = -1; 717 port->index = -1;
718 __team_adjust_ops(team, team->en_port_count - 1);
719 /*
720 * Wait until readers see adjusted ops. This ensures that
721 * readers never see team->en_port_count == 0
722 */
723 synchronize_rcu();
724 team->en_port_count--;
667} 725}
668 726
669#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \ 727#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
@@ -675,12 +733,14 @@ static void __team_compute_features(struct team *team)
675 struct team_port *port; 733 struct team_port *port;
676 u32 vlan_features = TEAM_VLAN_FEATURES; 734 u32 vlan_features = TEAM_VLAN_FEATURES;
677 unsigned short max_hard_header_len = ETH_HLEN; 735 unsigned short max_hard_header_len = ETH_HLEN;
736 unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
678 737
679 list_for_each_entry(port, &team->port_list, list) { 738 list_for_each_entry(port, &team->port_list, list) {
680 vlan_features = netdev_increment_features(vlan_features, 739 vlan_features = netdev_increment_features(vlan_features,
681 port->dev->vlan_features, 740 port->dev->vlan_features,
682 TEAM_VLAN_FEATURES); 741 TEAM_VLAN_FEATURES);
683 742
743 dst_release_flag &= port->dev->priv_flags;
684 if (port->dev->hard_header_len > max_hard_header_len) 744 if (port->dev->hard_header_len > max_hard_header_len)
685 max_hard_header_len = port->dev->hard_header_len; 745 max_hard_header_len = port->dev->hard_header_len;
686 } 746 }
@@ -688,6 +748,9 @@ static void __team_compute_features(struct team *team)
688 team->dev->vlan_features = vlan_features; 748 team->dev->vlan_features = vlan_features;
689 team->dev->hard_header_len = max_hard_header_len; 749 team->dev->hard_header_len = max_hard_header_len;
690 750
751 flags = team->dev->priv_flags & ~IFF_XMIT_DST_RELEASE;
752 team->dev->priv_flags = flags | dst_release_flag;
753
691 netdev_change_features(team->dev); 754 netdev_change_features(team->dev);
692} 755}
693 756
@@ -730,6 +793,58 @@ static void team_port_leave(struct team *team, struct team_port *port)
730 dev_put(team->dev); 793 dev_put(team->dev);
731} 794}
732 795
796#ifdef CONFIG_NET_POLL_CONTROLLER
797static int team_port_enable_netpoll(struct team *team, struct team_port *port)
798{
799 struct netpoll *np;
800 int err;
801
802 np = kzalloc(sizeof(*np), GFP_KERNEL);
803 if (!np)
804 return -ENOMEM;
805
806 err = __netpoll_setup(np, port->dev);
807 if (err) {
808 kfree(np);
809 return err;
810 }
811 port->np = np;
812 return err;
813}
814
815static void team_port_disable_netpoll(struct team_port *port)
816{
817 struct netpoll *np = port->np;
818
819 if (!np)
820 return;
821 port->np = NULL;
822
823 /* Wait for transmitting packets to finish before freeing. */
824 synchronize_rcu_bh();
825 __netpoll_cleanup(np);
826 kfree(np);
827}
828
829static struct netpoll_info *team_netpoll_info(struct team *team)
830{
831 return team->dev->npinfo;
832}
833
834#else
835static int team_port_enable_netpoll(struct team *team, struct team_port *port)
836{
837 return 0;
838}
839static void team_port_disable_netpoll(struct team_port *port)
840{
841}
842static struct netpoll_info *team_netpoll_info(struct team *team)
843{
844 return NULL;
845}
846#endif
847
733static void __team_port_change_check(struct team_port *port, bool linkup); 848static void __team_port_change_check(struct team_port *port, bool linkup);
734 849
735static int team_port_add(struct team *team, struct net_device *port_dev) 850static int team_port_add(struct team *team, struct net_device *port_dev)
@@ -758,7 +873,8 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
758 return -EBUSY; 873 return -EBUSY;
759 } 874 }
760 875
761 port = kzalloc(sizeof(struct team_port), GFP_KERNEL); 876 port = kzalloc(sizeof(struct team_port) + team->mode->port_priv_size,
877 GFP_KERNEL);
762 if (!port) 878 if (!port)
763 return -ENOMEM; 879 return -ENOMEM;
764 880
@@ -795,6 +911,15 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
795 goto err_vids_add; 911 goto err_vids_add;
796 } 912 }
797 913
914 if (team_netpoll_info(team)) {
915 err = team_port_enable_netpoll(team, port);
916 if (err) {
917 netdev_err(dev, "Failed to enable netpoll on device %s\n",
918 portname);
919 goto err_enable_netpoll;
920 }
921 }
922
798 err = netdev_set_master(port_dev, dev); 923 err = netdev_set_master(port_dev, dev);
799 if (err) { 924 if (err) {
800 netdev_err(dev, "Device %s failed to set master\n", portname); 925 netdev_err(dev, "Device %s failed to set master\n", portname);
@@ -809,7 +934,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
809 goto err_handler_register; 934 goto err_handler_register;
810 } 935 }
811 936
812 err = team_option_port_add(team, port); 937 err = __team_option_inst_add_port(team, port);
813 if (err) { 938 if (err) {
814 netdev_err(dev, "Device %s failed to add per-port options\n", 939 netdev_err(dev, "Device %s failed to add per-port options\n",
815 portname); 940 portname);
@@ -819,9 +944,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
819 port->index = -1; 944 port->index = -1;
820 team_port_enable(team, port); 945 team_port_enable(team, port);
821 list_add_tail_rcu(&port->list, &team->port_list); 946 list_add_tail_rcu(&port->list, &team->port_list);
822 team_adjust_ops(team);
823 __team_compute_features(team); 947 __team_compute_features(team);
824 __team_port_change_check(port, !!netif_carrier_ok(port_dev)); 948 __team_port_change_check(port, !!netif_carrier_ok(port_dev));
949 __team_options_change_check(team);
825 950
826 netdev_info(dev, "Port device %s added\n", portname); 951 netdev_info(dev, "Port device %s added\n", portname);
827 952
@@ -834,6 +959,9 @@ err_handler_register:
834 netdev_set_master(port_dev, NULL); 959 netdev_set_master(port_dev, NULL);
835 960
836err_set_master: 961err_set_master:
962 team_port_disable_netpoll(port);
963
964err_enable_netpoll:
837 vlan_vids_del_by_dev(port_dev, dev); 965 vlan_vids_del_by_dev(port_dev, dev);
838 966
839err_vids_add: 967err_vids_add:
@@ -865,14 +993,16 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
865 return -ENOENT; 993 return -ENOENT;
866 } 994 }
867 995
996 __team_option_inst_mark_removed_port(team, port);
997 __team_options_change_check(team);
998 __team_option_inst_del_port(team, port);
868 port->removed = true; 999 port->removed = true;
869 __team_port_change_check(port, false); 1000 __team_port_change_check(port, false);
870 team_port_disable(team, port); 1001 team_port_disable(team, port);
871 list_del_rcu(&port->list); 1002 list_del_rcu(&port->list);
872 team_adjust_ops(team);
873 team_option_port_del(team, port);
874 netdev_rx_handler_unregister(port_dev); 1003 netdev_rx_handler_unregister(port_dev);
875 netdev_set_master(port_dev, NULL); 1004 netdev_set_master(port_dev, NULL);
1005 team_port_disable_netpoll(port);
876 vlan_vids_del_by_dev(port_dev, dev); 1006 vlan_vids_del_by_dev(port_dev, dev);
877 dev_close(port_dev); 1007 dev_close(port_dev);
878 team_port_leave(team, port); 1008 team_port_leave(team, port);
@@ -891,11 +1021,9 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
891 * Net device ops 1021 * Net device ops
892 *****************/ 1022 *****************/
893 1023
894static const char team_no_mode_kind[] = "*NOMODE*";
895
896static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx) 1024static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
897{ 1025{
898 ctx->data.str_val = team->mode ? team->mode->kind : team_no_mode_kind; 1026 ctx->data.str_val = team->mode->kind;
899 return 0; 1027 return 0;
900} 1028}
901 1029
@@ -907,39 +1035,47 @@ static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
907static int team_port_en_option_get(struct team *team, 1035static int team_port_en_option_get(struct team *team,
908 struct team_gsetter_ctx *ctx) 1036 struct team_gsetter_ctx *ctx)
909{ 1037{
910 ctx->data.bool_val = team_port_enabled(ctx->port); 1038 struct team_port *port = ctx->info->port;
1039
1040 ctx->data.bool_val = team_port_enabled(port);
911 return 0; 1041 return 0;
912} 1042}
913 1043
914static int team_port_en_option_set(struct team *team, 1044static int team_port_en_option_set(struct team *team,
915 struct team_gsetter_ctx *ctx) 1045 struct team_gsetter_ctx *ctx)
916{ 1046{
1047 struct team_port *port = ctx->info->port;
1048
917 if (ctx->data.bool_val) 1049 if (ctx->data.bool_val)
918 team_port_enable(team, ctx->port); 1050 team_port_enable(team, port);
919 else 1051 else
920 team_port_disable(team, ctx->port); 1052 team_port_disable(team, port);
921 return 0; 1053 return 0;
922} 1054}
923 1055
924static int team_user_linkup_option_get(struct team *team, 1056static int team_user_linkup_option_get(struct team *team,
925 struct team_gsetter_ctx *ctx) 1057 struct team_gsetter_ctx *ctx)
926{ 1058{
927 ctx->data.bool_val = ctx->port->user.linkup; 1059 struct team_port *port = ctx->info->port;
1060
1061 ctx->data.bool_val = port->user.linkup;
928 return 0; 1062 return 0;
929} 1063}
930 1064
931static int team_user_linkup_option_set(struct team *team, 1065static int team_user_linkup_option_set(struct team *team,
932 struct team_gsetter_ctx *ctx) 1066 struct team_gsetter_ctx *ctx)
933{ 1067{
934 ctx->port->user.linkup = ctx->data.bool_val; 1068 struct team_port *port = ctx->info->port;
935 team_refresh_port_linkup(ctx->port); 1069
1070 port->user.linkup = ctx->data.bool_val;
1071 team_refresh_port_linkup(port);
936 return 0; 1072 return 0;
937} 1073}
938 1074
939static int team_user_linkup_en_option_get(struct team *team, 1075static int team_user_linkup_en_option_get(struct team *team,
940 struct team_gsetter_ctx *ctx) 1076 struct team_gsetter_ctx *ctx)
941{ 1077{
942 struct team_port *port = ctx->port; 1078 struct team_port *port = ctx->info->port;
943 1079
944 ctx->data.bool_val = port->user.linkup_enabled; 1080 ctx->data.bool_val = port->user.linkup_enabled;
945 return 0; 1081 return 0;
@@ -948,10 +1084,10 @@ static int team_user_linkup_en_option_get(struct team *team,
948static int team_user_linkup_en_option_set(struct team *team, 1084static int team_user_linkup_en_option_set(struct team *team,
949 struct team_gsetter_ctx *ctx) 1085 struct team_gsetter_ctx *ctx)
950{ 1086{
951 struct team_port *port = ctx->port; 1087 struct team_port *port = ctx->info->port;
952 1088
953 port->user.linkup_enabled = ctx->data.bool_val; 1089 port->user.linkup_enabled = ctx->data.bool_val;
954 team_refresh_port_linkup(ctx->port); 1090 team_refresh_port_linkup(port);
955 return 0; 1091 return 0;
956} 1092}
957 1093
@@ -993,6 +1129,7 @@ static int team_init(struct net_device *dev)
993 1129
994 team->dev = dev; 1130 team->dev = dev;
995 mutex_init(&team->lock); 1131 mutex_init(&team->lock);
1132 team_set_no_mode(team);
996 1133
997 team->pcpu_stats = alloc_percpu(struct team_pcpu_stats); 1134 team->pcpu_stats = alloc_percpu(struct team_pcpu_stats);
998 if (!team->pcpu_stats) 1135 if (!team->pcpu_stats)
@@ -1116,10 +1253,11 @@ static int team_set_mac_address(struct net_device *dev, void *p)
1116{ 1253{
1117 struct team *team = netdev_priv(dev); 1254 struct team *team = netdev_priv(dev);
1118 struct team_port *port; 1255 struct team_port *port;
1119 struct sockaddr *addr = p; 1256 int err;
1120 1257
1121 dev->addr_assign_type &= ~NET_ADDR_RANDOM; 1258 err = eth_mac_addr(dev, p);
1122 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 1259 if (err)
1260 return err;
1123 rcu_read_lock(); 1261 rcu_read_lock();
1124 list_for_each_entry_rcu(port, &team->port_list, list) 1262 list_for_each_entry_rcu(port, &team->port_list, list)
1125 if (team->ops.port_change_mac) 1263 if (team->ops.port_change_mac)
@@ -1240,6 +1378,48 @@ static int team_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
1240 return 0; 1378 return 0;
1241} 1379}
1242 1380
1381#ifdef CONFIG_NET_POLL_CONTROLLER
1382static void team_poll_controller(struct net_device *dev)
1383{
1384}
1385
1386static void __team_netpoll_cleanup(struct team *team)
1387{
1388 struct team_port *port;
1389
1390 list_for_each_entry(port, &team->port_list, list)
1391 team_port_disable_netpoll(port);
1392}
1393
1394static void team_netpoll_cleanup(struct net_device *dev)
1395{
1396 struct team *team = netdev_priv(dev);
1397
1398 mutex_lock(&team->lock);
1399 __team_netpoll_cleanup(team);
1400 mutex_unlock(&team->lock);
1401}
1402
1403static int team_netpoll_setup(struct net_device *dev,
1404 struct netpoll_info *npifo)
1405{
1406 struct team *team = netdev_priv(dev);
1407 struct team_port *port;
1408 int err;
1409
1410 mutex_lock(&team->lock);
1411 list_for_each_entry(port, &team->port_list, list) {
1412 err = team_port_enable_netpoll(team, port);
1413 if (err) {
1414 __team_netpoll_cleanup(team);
1415 break;
1416 }
1417 }
1418 mutex_unlock(&team->lock);
1419 return err;
1420}
1421#endif
1422
1243static int team_add_slave(struct net_device *dev, struct net_device *port_dev) 1423static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
1244{ 1424{
1245 struct team *team = netdev_priv(dev); 1425 struct team *team = netdev_priv(dev);
@@ -1296,6 +1476,11 @@ static const struct net_device_ops team_netdev_ops = {
1296 .ndo_get_stats64 = team_get_stats64, 1476 .ndo_get_stats64 = team_get_stats64,
1297 .ndo_vlan_rx_add_vid = team_vlan_rx_add_vid, 1477 .ndo_vlan_rx_add_vid = team_vlan_rx_add_vid,
1298 .ndo_vlan_rx_kill_vid = team_vlan_rx_kill_vid, 1478 .ndo_vlan_rx_kill_vid = team_vlan_rx_kill_vid,
1479#ifdef CONFIG_NET_POLL_CONTROLLER
1480 .ndo_poll_controller = team_poll_controller,
1481 .ndo_netpoll_setup = team_netpoll_setup,
1482 .ndo_netpoll_cleanup = team_netpoll_cleanup,
1483#endif
1299 .ndo_add_slave = team_add_slave, 1484 .ndo_add_slave = team_add_slave,
1300 .ndo_del_slave = team_del_slave, 1485 .ndo_del_slave = team_del_slave,
1301 .ndo_fix_features = team_fix_features, 1486 .ndo_fix_features = team_fix_features,
@@ -1321,7 +1506,7 @@ static void team_setup(struct net_device *dev)
1321 * bring us to promisc mode in case a unicast addr is added. 1506 * bring us to promisc mode in case a unicast addr is added.
1322 * Let this up to underlay drivers. 1507 * Let this up to underlay drivers.
1323 */ 1508 */
1324 dev->priv_flags |= IFF_UNICAST_FLT; 1509 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
1325 1510
1326 dev->features |= NETIF_F_LLTX; 1511 dev->features |= NETIF_F_LLTX;
1327 dev->features |= NETIF_F_GRO; 1512 dev->features |= NETIF_F_GRO;
@@ -1404,7 +1589,7 @@ static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
1404 void *hdr; 1589 void *hdr;
1405 int err; 1590 int err;
1406 1591
1407 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1592 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1408 if (!msg) 1593 if (!msg)
1409 return -ENOMEM; 1594 return -ENOMEM;
1410 1595
@@ -1466,7 +1651,7 @@ static int team_nl_send_generic(struct genl_info *info, struct team *team,
1466 struct sk_buff *skb; 1651 struct sk_buff *skb;
1467 int err; 1652 int err;
1468 1653
1469 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1654 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1470 if (!skb) 1655 if (!skb)
1471 return -ENOMEM; 1656 return -ENOMEM;
1472 1657
@@ -1482,16 +1667,128 @@ err_fill:
1482 return err; 1667 return err;
1483} 1668}
1484 1669
1485static int team_nl_fill_options_get(struct sk_buff *skb, 1670typedef int team_nl_send_func_t(struct sk_buff *skb,
1486 u32 pid, u32 seq, int flags, 1671 struct team *team, u32 pid);
1487 struct team *team, bool fillall) 1672
1673static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 pid)
1674{
1675 return genlmsg_unicast(dev_net(team->dev), skb, pid);
1676}
1677
1678static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
1679 struct team_option_inst *opt_inst)
1680{
1681 struct nlattr *option_item;
1682 struct team_option *option = opt_inst->option;
1683 struct team_option_inst_info *opt_inst_info = &opt_inst->info;
1684 struct team_gsetter_ctx ctx;
1685 int err;
1686
1687 ctx.info = opt_inst_info;
1688 err = team_option_get(team, opt_inst, &ctx);
1689 if (err)
1690 return err;
1691
1692 option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
1693 if (!option_item)
1694 return -EMSGSIZE;
1695
1696 if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
1697 goto nest_cancel;
1698 if (opt_inst_info->port &&
1699 nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
1700 opt_inst_info->port->dev->ifindex))
1701 goto nest_cancel;
1702 if (opt_inst->option->array_size &&
1703 nla_put_u32(skb, TEAM_ATTR_OPTION_ARRAY_INDEX,
1704 opt_inst_info->array_index))
1705 goto nest_cancel;
1706
1707 switch (option->type) {
1708 case TEAM_OPTION_TYPE_U32:
1709 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
1710 goto nest_cancel;
1711 if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.u32_val))
1712 goto nest_cancel;
1713 break;
1714 case TEAM_OPTION_TYPE_STRING:
1715 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
1716 goto nest_cancel;
1717 if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
1718 ctx.data.str_val))
1719 goto nest_cancel;
1720 break;
1721 case TEAM_OPTION_TYPE_BINARY:
1722 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
1723 goto nest_cancel;
1724 if (nla_put(skb, TEAM_ATTR_OPTION_DATA, ctx.data.bin_val.len,
1725 ctx.data.bin_val.ptr))
1726 goto nest_cancel;
1727 break;
1728 case TEAM_OPTION_TYPE_BOOL:
1729 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
1730 goto nest_cancel;
1731 if (ctx.data.bool_val &&
1732 nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
1733 goto nest_cancel;
1734 break;
1735 default:
1736 BUG();
1737 }
1738 if (opt_inst->removed && nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
1739 goto nest_cancel;
1740 if (opt_inst->changed) {
1741 if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
1742 goto nest_cancel;
1743 opt_inst->changed = false;
1744 }
1745 nla_nest_end(skb, option_item);
1746 return 0;
1747
1748nest_cancel:
1749 nla_nest_cancel(skb, option_item);
1750 return -EMSGSIZE;
1751}
1752
1753static int __send_and_alloc_skb(struct sk_buff **pskb,
1754 struct team *team, u32 pid,
1755 team_nl_send_func_t *send_func)
1756{
1757 int err;
1758
1759 if (*pskb) {
1760 err = send_func(*pskb, team, pid);
1761 if (err)
1762 return err;
1763 }
1764 *pskb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
1765 if (!*pskb)
1766 return -ENOMEM;
1767 return 0;
1768}
1769
1770static int team_nl_send_options_get(struct team *team, u32 pid, u32 seq,
1771 int flags, team_nl_send_func_t *send_func,
1772 struct list_head *sel_opt_inst_list)
1488{ 1773{
1489 struct nlattr *option_list; 1774 struct nlattr *option_list;
1775 struct nlmsghdr *nlh;
1490 void *hdr; 1776 void *hdr;
1491 struct team_option_inst *opt_inst; 1777 struct team_option_inst *opt_inst;
1492 int err; 1778 int err;
1779 struct sk_buff *skb = NULL;
1780 bool incomplete;
1781 int i;
1493 1782
1494 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags, 1783 opt_inst = list_first_entry(sel_opt_inst_list,
1784 struct team_option_inst, tmp_list);
1785
1786start_again:
1787 err = __send_and_alloc_skb(&skb, team, pid, send_func);
1788 if (err)
1789 return err;
1790
1791 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags | NLM_F_MULTI,
1495 TEAM_CMD_OPTIONS_GET); 1792 TEAM_CMD_OPTIONS_GET);
1496 if (IS_ERR(hdr)) 1793 if (IS_ERR(hdr))
1497 return PTR_ERR(hdr); 1794 return PTR_ERR(hdr);
@@ -1500,122 +1797,80 @@ static int team_nl_fill_options_get(struct sk_buff *skb,
1500 goto nla_put_failure; 1797 goto nla_put_failure;
1501 option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION); 1798 option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
1502 if (!option_list) 1799 if (!option_list)
1503 return -EMSGSIZE; 1800 goto nla_put_failure;
1504
1505 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
1506 struct nlattr *option_item;
1507 struct team_option *option = opt_inst->option;
1508 struct team_gsetter_ctx ctx;
1509 1801
1510 /* Include only changed options if fill all mode is not on */ 1802 i = 0;
1511 if (!fillall && !opt_inst->changed) 1803 incomplete = false;
1512 continue; 1804 list_for_each_entry_from(opt_inst, sel_opt_inst_list, tmp_list) {
1513 option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION); 1805 err = team_nl_fill_one_option_get(skb, team, opt_inst);
1514 if (!option_item) 1806 if (err) {
1515 goto nla_put_failure; 1807 if (err == -EMSGSIZE) {
1516 if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name)) 1808 if (!i)
1517 goto nla_put_failure; 1809 goto errout;
1518 if (opt_inst->changed) { 1810 incomplete = true;
1519 if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED)) 1811 break;
1520 goto nla_put_failure; 1812 }
1521 opt_inst->changed = false; 1813 goto errout;
1522 }
1523 if (opt_inst->removed &&
1524 nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
1525 goto nla_put_failure;
1526 if (opt_inst->port &&
1527 nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
1528 opt_inst->port->dev->ifindex))
1529 goto nla_put_failure;
1530 ctx.port = opt_inst->port;
1531 switch (option->type) {
1532 case TEAM_OPTION_TYPE_U32:
1533 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
1534 goto nla_put_failure;
1535 err = team_option_get(team, opt_inst, &ctx);
1536 if (err)
1537 goto errout;
1538 if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA,
1539 ctx.data.u32_val))
1540 goto nla_put_failure;
1541 break;
1542 case TEAM_OPTION_TYPE_STRING:
1543 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
1544 goto nla_put_failure;
1545 err = team_option_get(team, opt_inst, &ctx);
1546 if (err)
1547 goto errout;
1548 if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
1549 ctx.data.str_val))
1550 goto nla_put_failure;
1551 break;
1552 case TEAM_OPTION_TYPE_BINARY:
1553 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
1554 goto nla_put_failure;
1555 err = team_option_get(team, opt_inst, &ctx);
1556 if (err)
1557 goto errout;
1558 if (nla_put(skb, TEAM_ATTR_OPTION_DATA,
1559 ctx.data.bin_val.len, ctx.data.bin_val.ptr))
1560 goto nla_put_failure;
1561 break;
1562 case TEAM_OPTION_TYPE_BOOL:
1563 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
1564 goto nla_put_failure;
1565 err = team_option_get(team, opt_inst, &ctx);
1566 if (err)
1567 goto errout;
1568 if (ctx.data.bool_val &&
1569 nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
1570 goto nla_put_failure;
1571 break;
1572 default:
1573 BUG();
1574 } 1814 }
1575 nla_nest_end(skb, option_item); 1815 i++;
1576 } 1816 }
1577 1817
1578 nla_nest_end(skb, option_list); 1818 nla_nest_end(skb, option_list);
1579 return genlmsg_end(skb, hdr); 1819 genlmsg_end(skb, hdr);
1820 if (incomplete)
1821 goto start_again;
1822
1823send_done:
1824 nlh = nlmsg_put(skb, pid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
1825 if (!nlh) {
1826 err = __send_and_alloc_skb(&skb, team, pid, send_func);
1827 if (err)
1828 goto errout;
1829 goto send_done;
1830 }
1831
1832 return send_func(skb, team, pid);
1580 1833
1581nla_put_failure: 1834nla_put_failure:
1582 err = -EMSGSIZE; 1835 err = -EMSGSIZE;
1583errout: 1836errout:
1584 genlmsg_cancel(skb, hdr); 1837 genlmsg_cancel(skb, hdr);
1838 nlmsg_free(skb);
1585 return err; 1839 return err;
1586} 1840}
1587 1841
1588static int team_nl_fill_options_get_all(struct sk_buff *skb,
1589 struct genl_info *info, int flags,
1590 struct team *team)
1591{
1592 return team_nl_fill_options_get(skb, info->snd_pid,
1593 info->snd_seq, NLM_F_ACK,
1594 team, true);
1595}
1596
1597static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info) 1842static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
1598{ 1843{
1599 struct team *team; 1844 struct team *team;
1845 struct team_option_inst *opt_inst;
1600 int err; 1846 int err;
1847 LIST_HEAD(sel_opt_inst_list);
1601 1848
1602 team = team_nl_team_get(info); 1849 team = team_nl_team_get(info);
1603 if (!team) 1850 if (!team)
1604 return -EINVAL; 1851 return -EINVAL;
1605 1852
1606 err = team_nl_send_generic(info, team, team_nl_fill_options_get_all); 1853 list_for_each_entry(opt_inst, &team->option_inst_list, list)
1854 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
1855 err = team_nl_send_options_get(team, info->snd_pid, info->snd_seq,
1856 NLM_F_ACK, team_nl_send_unicast,
1857 &sel_opt_inst_list);
1607 1858
1608 team_nl_team_put(team); 1859 team_nl_team_put(team);
1609 1860
1610 return err; 1861 return err;
1611} 1862}
1612 1863
1864static int team_nl_send_event_options_get(struct team *team,
1865 struct list_head *sel_opt_inst_list);
1866
1613static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) 1867static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
1614{ 1868{
1615 struct team *team; 1869 struct team *team;
1616 int err = 0; 1870 int err = 0;
1617 int i; 1871 int i;
1618 struct nlattr *nl_option; 1872 struct nlattr *nl_option;
1873 LIST_HEAD(opt_inst_list);
1619 1874
1620 team = team_nl_team_get(info); 1875 team = team_nl_team_get(info);
1621 if (!team) 1876 if (!team)
@@ -1629,10 +1884,12 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
1629 1884
1630 nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) { 1885 nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
1631 struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1]; 1886 struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
1632 struct nlattr *attr_port_ifindex; 1887 struct nlattr *attr;
1633 struct nlattr *attr_data; 1888 struct nlattr *attr_data;
1634 enum team_option_type opt_type; 1889 enum team_option_type opt_type;
1635 int opt_port_ifindex = 0; /* != 0 for per-port options */ 1890 int opt_port_ifindex = 0; /* != 0 for per-port options */
1891 u32 opt_array_index = 0;
1892 bool opt_is_array = false;
1636 struct team_option_inst *opt_inst; 1893 struct team_option_inst *opt_inst;
1637 char *opt_name; 1894 char *opt_name;
1638 bool opt_found = false; 1895 bool opt_found = false;
@@ -1674,23 +1931,33 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
1674 } 1931 }
1675 1932
1676 opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]); 1933 opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]);
1677 attr_port_ifindex = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX]; 1934 attr = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
1678 if (attr_port_ifindex) 1935 if (attr)
1679 opt_port_ifindex = nla_get_u32(attr_port_ifindex); 1936 opt_port_ifindex = nla_get_u32(attr);
1937
1938 attr = opt_attrs[TEAM_ATTR_OPTION_ARRAY_INDEX];
1939 if (attr) {
1940 opt_is_array = true;
1941 opt_array_index = nla_get_u32(attr);
1942 }
1680 1943
1681 list_for_each_entry(opt_inst, &team->option_inst_list, list) { 1944 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
1682 struct team_option *option = opt_inst->option; 1945 struct team_option *option = opt_inst->option;
1683 struct team_gsetter_ctx ctx; 1946 struct team_gsetter_ctx ctx;
1947 struct team_option_inst_info *opt_inst_info;
1684 int tmp_ifindex; 1948 int tmp_ifindex;
1685 1949
1686 tmp_ifindex = opt_inst->port ? 1950 opt_inst_info = &opt_inst->info;
1687 opt_inst->port->dev->ifindex : 0; 1951 tmp_ifindex = opt_inst_info->port ?
1952 opt_inst_info->port->dev->ifindex : 0;
1688 if (option->type != opt_type || 1953 if (option->type != opt_type ||
1689 strcmp(option->name, opt_name) || 1954 strcmp(option->name, opt_name) ||
1690 tmp_ifindex != opt_port_ifindex) 1955 tmp_ifindex != opt_port_ifindex ||
1956 (option->array_size && !opt_is_array) ||
1957 opt_inst_info->array_index != opt_array_index)
1691 continue; 1958 continue;
1692 opt_found = true; 1959 opt_found = true;
1693 ctx.port = opt_inst->port; 1960 ctx.info = opt_inst_info;
1694 switch (opt_type) { 1961 switch (opt_type) {
1695 case TEAM_OPTION_TYPE_U32: 1962 case TEAM_OPTION_TYPE_U32:
1696 ctx.data.u32_val = nla_get_u32(attr_data); 1963 ctx.data.u32_val = nla_get_u32(attr_data);
@@ -1715,6 +1982,8 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
1715 err = team_option_set(team, opt_inst, &ctx); 1982 err = team_option_set(team, opt_inst, &ctx);
1716 if (err) 1983 if (err)
1717 goto team_put; 1984 goto team_put;
1985 opt_inst->changed = true;
1986 list_add(&opt_inst->tmp_list, &opt_inst_list);
1718 } 1987 }
1719 if (!opt_found) { 1988 if (!opt_found) {
1720 err = -ENOENT; 1989 err = -ENOENT;
@@ -1722,6 +1991,8 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
1722 } 1991 }
1723 } 1992 }
1724 1993
1994 err = team_nl_send_event_options_get(team, &opt_inst_list);
1995
1725team_put: 1996team_put:
1726 team_nl_team_put(team); 1997 team_nl_team_put(team);
1727 1998
@@ -1746,7 +2017,7 @@ static int team_nl_fill_port_list_get(struct sk_buff *skb,
1746 goto nla_put_failure; 2017 goto nla_put_failure;
1747 port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT); 2018 port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
1748 if (!port_list) 2019 if (!port_list)
1749 return -EMSGSIZE; 2020 goto nla_put_failure;
1750 2021
1751 list_for_each_entry(port, &team->port_list, list) { 2022 list_for_each_entry(port, &team->port_list, list) {
1752 struct nlattr *port_item; 2023 struct nlattr *port_item;
@@ -1838,27 +2109,18 @@ static struct genl_multicast_group team_change_event_mcgrp = {
1838 .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, 2109 .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME,
1839}; 2110};
1840 2111
1841static int team_nl_send_event_options_get(struct team *team) 2112static int team_nl_send_multicast(struct sk_buff *skb,
2113 struct team *team, u32 pid)
1842{ 2114{
1843 struct sk_buff *skb; 2115 return genlmsg_multicast_netns(dev_net(team->dev), skb, 0,
1844 int err; 2116 team_change_event_mcgrp.id, GFP_KERNEL);
1845 struct net *net = dev_net(team->dev); 2117}
1846
1847 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1848 if (!skb)
1849 return -ENOMEM;
1850
1851 err = team_nl_fill_options_get(skb, 0, 0, 0, team, false);
1852 if (err < 0)
1853 goto err_fill;
1854
1855 err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
1856 GFP_KERNEL);
1857 return err;
1858 2118
1859err_fill: 2119static int team_nl_send_event_options_get(struct team *team,
1860 nlmsg_free(skb); 2120 struct list_head *sel_opt_inst_list)
1861 return err; 2121{
2122 return team_nl_send_options_get(team, 0, 0, 0, team_nl_send_multicast,
2123 sel_opt_inst_list);
1862} 2124}
1863 2125
1864static int team_nl_send_event_port_list_get(struct team *team) 2126static int team_nl_send_event_port_list_get(struct team *team)
@@ -1867,7 +2129,7 @@ static int team_nl_send_event_port_list_get(struct team *team)
1867 int err; 2129 int err;
1868 struct net *net = dev_net(team->dev); 2130 struct net *net = dev_net(team->dev);
1869 2131
1870 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 2132 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1871 if (!skb) 2133 if (!skb)
1872 return -ENOMEM; 2134 return -ENOMEM;
1873 2135
@@ -1918,10 +2180,17 @@ static void team_nl_fini(void)
1918static void __team_options_change_check(struct team *team) 2180static void __team_options_change_check(struct team *team)
1919{ 2181{
1920 int err; 2182 int err;
2183 struct team_option_inst *opt_inst;
2184 LIST_HEAD(sel_opt_inst_list);
1921 2185
1922 err = team_nl_send_event_options_get(team); 2186 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2187 if (opt_inst->changed)
2188 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2189 }
2190 err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
1923 if (err) 2191 if (err)
1924 netdev_warn(team->dev, "Failed to send options change via netlink\n"); 2192 netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n",
2193 err);
1925} 2194}
1926 2195
1927/* rtnl lock is held */ 2196/* rtnl lock is held */
@@ -1965,6 +2234,7 @@ static void team_port_change_check(struct team_port *port, bool linkup)
1965 mutex_unlock(&team->lock); 2234 mutex_unlock(&team->lock);
1966} 2235}
1967 2236
2237
1968/************************************ 2238/************************************
1969 * Net device notifier event handler 2239 * Net device notifier event handler
1970 ************************************/ 2240 ************************************/
diff --git a/drivers/net/team/team_mode_activebackup.c b/drivers/net/team/team_mode_activebackup.c
index fd6bd03aaa89..6262b4defd93 100644
--- a/drivers/net/team/team_mode_activebackup.c
+++ b/drivers/net/team/team_mode_activebackup.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * net/drivers/team/team_mode_activebackup.c - Active-backup mode for team 2 * drivers/net/team/team_mode_activebackup.c - Active-backup mode for team
3 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com> 3 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
@@ -40,11 +40,10 @@ static bool ab_transmit(struct team *team, struct sk_buff *skb)
40{ 40{
41 struct team_port *active_port; 41 struct team_port *active_port;
42 42
43 active_port = rcu_dereference(ab_priv(team)->active_port); 43 active_port = rcu_dereference_bh(ab_priv(team)->active_port);
44 if (unlikely(!active_port)) 44 if (unlikely(!active_port))
45 goto drop; 45 goto drop;
46 skb->dev = active_port->dev; 46 if (team_dev_queue_xmit(team, active_port, skb))
47 if (dev_queue_xmit(skb))
48 return false; 47 return false;
49 return true; 48 return true;
50 49
@@ -61,8 +60,12 @@ static void ab_port_leave(struct team *team, struct team_port *port)
61 60
62static int ab_active_port_get(struct team *team, struct team_gsetter_ctx *ctx) 61static int ab_active_port_get(struct team *team, struct team_gsetter_ctx *ctx)
63{ 62{
64 if (ab_priv(team)->active_port) 63 struct team_port *active_port;
65 ctx->data.u32_val = ab_priv(team)->active_port->dev->ifindex; 64
65 active_port = rcu_dereference_protected(ab_priv(team)->active_port,
66 lockdep_is_held(&team->lock));
67 if (active_port)
68 ctx->data.u32_val = active_port->dev->ifindex;
66 else 69 else
67 ctx->data.u32_val = 0; 70 ctx->data.u32_val = 0;
68 return 0; 71 return 0;
@@ -108,7 +111,7 @@ static const struct team_mode_ops ab_mode_ops = {
108 .port_leave = ab_port_leave, 111 .port_leave = ab_port_leave,
109}; 112};
110 113
111static struct team_mode ab_mode = { 114static const struct team_mode ab_mode = {
112 .kind = "activebackup", 115 .kind = "activebackup",
113 .owner = THIS_MODULE, 116 .owner = THIS_MODULE,
114 .priv_size = sizeof(struct ab_priv), 117 .priv_size = sizeof(struct ab_priv),
diff --git a/drivers/net/team/team_mode_broadcast.c b/drivers/net/team/team_mode_broadcast.c
new file mode 100644
index 000000000000..c96e4d2967f0
--- /dev/null
+++ b/drivers/net/team/team_mode_broadcast.c
@@ -0,0 +1,87 @@
1/*
2 * drivers/net/team/team_mode_broadcast.c - Broadcast mode for team
3 * Copyright (c) 2012 Jiri Pirko <jpirko@redhat.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/errno.h>
16#include <linux/netdevice.h>
17#include <linux/if_team.h>
18
19static bool bc_transmit(struct team *team, struct sk_buff *skb)
20{
21 struct team_port *cur;
22 struct team_port *last = NULL;
23 struct sk_buff *skb2;
24 bool ret;
25 bool sum_ret = false;
26
27 list_for_each_entry_rcu(cur, &team->port_list, list) {
28 if (team_port_txable(cur)) {
29 if (last) {
30 skb2 = skb_clone(skb, GFP_ATOMIC);
31 if (skb2) {
32 ret = team_dev_queue_xmit(team, last,
33 skb2);
34 if (!sum_ret)
35 sum_ret = ret;
36 }
37 }
38 last = cur;
39 }
40 }
41 if (last) {
42 ret = team_dev_queue_xmit(team, last, skb);
43 if (!sum_ret)
44 sum_ret = ret;
45 }
46 return sum_ret;
47}
48
49static int bc_port_enter(struct team *team, struct team_port *port)
50{
51 return team_port_set_team_mac(port);
52}
53
54static void bc_port_change_mac(struct team *team, struct team_port *port)
55{
56 team_port_set_team_mac(port);
57}
58
59static const struct team_mode_ops bc_mode_ops = {
60 .transmit = bc_transmit,
61 .port_enter = bc_port_enter,
62 .port_change_mac = bc_port_change_mac,
63};
64
65static const struct team_mode bc_mode = {
66 .kind = "broadcast",
67 .owner = THIS_MODULE,
68 .ops = &bc_mode_ops,
69};
70
71static int __init bc_init_module(void)
72{
73 return team_mode_register(&bc_mode);
74}
75
76static void __exit bc_cleanup_module(void)
77{
78 team_mode_unregister(&bc_mode);
79}
80
81module_init(bc_init_module);
82module_exit(bc_cleanup_module);
83
84MODULE_LICENSE("GPL v2");
85MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
86MODULE_DESCRIPTION("Broadcast mode for team");
87MODULE_ALIAS("team-mode-broadcast");
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
index 86e8183c8e3d..cdc31b5ea15e 100644
--- a/drivers/net/team/team_mode_loadbalance.c
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -17,34 +17,209 @@
17#include <linux/filter.h> 17#include <linux/filter.h>
18#include <linux/if_team.h> 18#include <linux/if_team.h>
19 19
20struct lb_priv;
21
22typedef struct team_port *lb_select_tx_port_func_t(struct team *,
23 struct lb_priv *,
24 struct sk_buff *,
25 unsigned char);
26
27#define LB_TX_HASHTABLE_SIZE 256 /* hash is a char */
28
29struct lb_stats {
30 u64 tx_bytes;
31};
32
33struct lb_pcpu_stats {
34 struct lb_stats hash_stats[LB_TX_HASHTABLE_SIZE];
35 struct u64_stats_sync syncp;
36};
37
38struct lb_stats_info {
39 struct lb_stats stats;
40 struct lb_stats last_stats;
41 struct team_option_inst_info *opt_inst_info;
42};
43
44struct lb_port_mapping {
45 struct team_port __rcu *port;
46 struct team_option_inst_info *opt_inst_info;
47};
48
49struct lb_priv_ex {
50 struct team *team;
51 struct lb_port_mapping tx_hash_to_port_mapping[LB_TX_HASHTABLE_SIZE];
52 struct sock_fprog *orig_fprog;
53 struct {
54 unsigned int refresh_interval; /* in tenths of second */
55 struct delayed_work refresh_dw;
56 struct lb_stats_info info[LB_TX_HASHTABLE_SIZE];
57 } stats;
58};
59
20struct lb_priv { 60struct lb_priv {
21 struct sk_filter __rcu *fp; 61 struct sk_filter __rcu *fp;
22 struct sock_fprog *orig_fprog; 62 lb_select_tx_port_func_t __rcu *select_tx_port_func;
63 struct lb_pcpu_stats __percpu *pcpu_stats;
64 struct lb_priv_ex *ex; /* priv extension */
23}; 65};
24 66
25static struct lb_priv *lb_priv(struct team *team) 67static struct lb_priv *get_lb_priv(struct team *team)
26{ 68{
27 return (struct lb_priv *) &team->mode_priv; 69 return (struct lb_priv *) &team->mode_priv;
28} 70}
29 71
30static bool lb_transmit(struct team *team, struct sk_buff *skb) 72struct lb_port_priv {
73 struct lb_stats __percpu *pcpu_stats;
74 struct lb_stats_info stats_info;
75};
76
77static struct lb_port_priv *get_lb_port_priv(struct team_port *port)
78{
79 return (struct lb_port_priv *) &port->mode_priv;
80}
81
82#define LB_HTPM_PORT_BY_HASH(lp_priv, hash) \
83 (lb_priv)->ex->tx_hash_to_port_mapping[hash].port
84
85#define LB_HTPM_OPT_INST_INFO_BY_HASH(lp_priv, hash) \
86 (lb_priv)->ex->tx_hash_to_port_mapping[hash].opt_inst_info
87
88static void lb_tx_hash_to_port_mapping_null_port(struct team *team,
89 struct team_port *port)
90{
91 struct lb_priv *lb_priv = get_lb_priv(team);
92 bool changed = false;
93 int i;
94
95 for (i = 0; i < LB_TX_HASHTABLE_SIZE; i++) {
96 struct lb_port_mapping *pm;
97
98 pm = &lb_priv->ex->tx_hash_to_port_mapping[i];
99 if (rcu_access_pointer(pm->port) == port) {
100 RCU_INIT_POINTER(pm->port, NULL);
101 team_option_inst_set_change(pm->opt_inst_info);
102 changed = true;
103 }
104 }
105 if (changed)
106 team_options_change_check(team);
107}
108
109/* Basic tx selection based solely by hash */
110static struct team_port *lb_hash_select_tx_port(struct team *team,
111 struct lb_priv *lb_priv,
112 struct sk_buff *skb,
113 unsigned char hash)
31{ 114{
32 struct sk_filter *fp;
33 struct team_port *port;
34 unsigned int hash;
35 int port_index; 115 int port_index;
36 116
37 fp = rcu_dereference(lb_priv(team)->fp);
38 if (unlikely(!fp))
39 goto drop;
40 hash = SK_RUN_FILTER(fp, skb);
41 port_index = hash % team->en_port_count; 117 port_index = hash % team->en_port_count;
42 port = team_get_port_by_index_rcu(team, port_index); 118 return team_get_port_by_index_rcu(team, port_index);
119}
120
121/* Hash to port mapping select tx port */
122static struct team_port *lb_htpm_select_tx_port(struct team *team,
123 struct lb_priv *lb_priv,
124 struct sk_buff *skb,
125 unsigned char hash)
126{
127 return rcu_dereference_bh(LB_HTPM_PORT_BY_HASH(lb_priv, hash));
128}
129
130struct lb_select_tx_port {
131 char *name;
132 lb_select_tx_port_func_t *func;
133};
134
135static const struct lb_select_tx_port lb_select_tx_port_list[] = {
136 {
137 .name = "hash",
138 .func = lb_hash_select_tx_port,
139 },
140 {
141 .name = "hash_to_port_mapping",
142 .func = lb_htpm_select_tx_port,
143 },
144};
145#define LB_SELECT_TX_PORT_LIST_COUNT ARRAY_SIZE(lb_select_tx_port_list)
146
147static char *lb_select_tx_port_get_name(lb_select_tx_port_func_t *func)
148{
149 int i;
150
151 for (i = 0; i < LB_SELECT_TX_PORT_LIST_COUNT; i++) {
152 const struct lb_select_tx_port *item;
153
154 item = &lb_select_tx_port_list[i];
155 if (item->func == func)
156 return item->name;
157 }
158 return NULL;
159}
160
161static lb_select_tx_port_func_t *lb_select_tx_port_get_func(const char *name)
162{
163 int i;
164
165 for (i = 0; i < LB_SELECT_TX_PORT_LIST_COUNT; i++) {
166 const struct lb_select_tx_port *item;
167
168 item = &lb_select_tx_port_list[i];
169 if (!strcmp(item->name, name))
170 return item->func;
171 }
172 return NULL;
173}
174
175static unsigned int lb_get_skb_hash(struct lb_priv *lb_priv,
176 struct sk_buff *skb)
177{
178 struct sk_filter *fp;
179 uint32_t lhash;
180 unsigned char *c;
181
182 fp = rcu_dereference_bh(lb_priv->fp);
183 if (unlikely(!fp))
184 return 0;
185 lhash = SK_RUN_FILTER(fp, skb);
186 c = (char *) &lhash;
187 return c[0] ^ c[1] ^ c[2] ^ c[3];
188}
189
190static void lb_update_tx_stats(unsigned int tx_bytes, struct lb_priv *lb_priv,
191 struct lb_port_priv *lb_port_priv,
192 unsigned char hash)
193{
194 struct lb_pcpu_stats *pcpu_stats;
195 struct lb_stats *port_stats;
196 struct lb_stats *hash_stats;
197
198 pcpu_stats = this_cpu_ptr(lb_priv->pcpu_stats);
199 port_stats = this_cpu_ptr(lb_port_priv->pcpu_stats);
200 hash_stats = &pcpu_stats->hash_stats[hash];
201 u64_stats_update_begin(&pcpu_stats->syncp);
202 port_stats->tx_bytes += tx_bytes;
203 hash_stats->tx_bytes += tx_bytes;
204 u64_stats_update_end(&pcpu_stats->syncp);
205}
206
207static bool lb_transmit(struct team *team, struct sk_buff *skb)
208{
209 struct lb_priv *lb_priv = get_lb_priv(team);
210 lb_select_tx_port_func_t *select_tx_port_func;
211 struct team_port *port;
212 unsigned char hash;
213 unsigned int tx_bytes = skb->len;
214
215 hash = lb_get_skb_hash(lb_priv, skb);
216 select_tx_port_func = rcu_dereference_bh(lb_priv->select_tx_port_func);
217 port = select_tx_port_func(team, lb_priv, skb, hash);
43 if (unlikely(!port)) 218 if (unlikely(!port))
44 goto drop; 219 goto drop;
45 skb->dev = port->dev; 220 if (team_dev_queue_xmit(team, port, skb))
46 if (dev_queue_xmit(skb))
47 return false; 221 return false;
222 lb_update_tx_stats(tx_bytes, lb_priv, get_lb_port_priv(port), hash);
48 return true; 223 return true;
49 224
50drop: 225drop:
@@ -54,14 +229,16 @@ drop:
54 229
55static int lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx) 230static int lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx)
56{ 231{
57 if (!lb_priv(team)->orig_fprog) { 232 struct lb_priv *lb_priv = get_lb_priv(team);
233
234 if (!lb_priv->ex->orig_fprog) {
58 ctx->data.bin_val.len = 0; 235 ctx->data.bin_val.len = 0;
59 ctx->data.bin_val.ptr = NULL; 236 ctx->data.bin_val.ptr = NULL;
60 return 0; 237 return 0;
61 } 238 }
62 ctx->data.bin_val.len = lb_priv(team)->orig_fprog->len * 239 ctx->data.bin_val.len = lb_priv->ex->orig_fprog->len *
63 sizeof(struct sock_filter); 240 sizeof(struct sock_filter);
64 ctx->data.bin_val.ptr = lb_priv(team)->orig_fprog->filter; 241 ctx->data.bin_val.ptr = lb_priv->ex->orig_fprog->filter;
65 return 0; 242 return 0;
66} 243}
67 244
@@ -94,7 +271,9 @@ static void __fprog_destroy(struct sock_fprog *fprog)
94 271
95static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx) 272static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
96{ 273{
274 struct lb_priv *lb_priv = get_lb_priv(team);
97 struct sk_filter *fp = NULL; 275 struct sk_filter *fp = NULL;
276 struct sk_filter *orig_fp;
98 struct sock_fprog *fprog = NULL; 277 struct sock_fprog *fprog = NULL;
99 int err; 278 int err;
100 279
@@ -110,14 +289,238 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
110 } 289 }
111 } 290 }
112 291
113 if (lb_priv(team)->orig_fprog) { 292 if (lb_priv->ex->orig_fprog) {
114 /* Clear old filter data */ 293 /* Clear old filter data */
115 __fprog_destroy(lb_priv(team)->orig_fprog); 294 __fprog_destroy(lb_priv->ex->orig_fprog);
116 sk_unattached_filter_destroy(lb_priv(team)->fp); 295 orig_fp = rcu_dereference_protected(lb_priv->fp,
296 lockdep_is_held(&team->lock));
297 sk_unattached_filter_destroy(orig_fp);
117 } 298 }
118 299
119 rcu_assign_pointer(lb_priv(team)->fp, fp); 300 rcu_assign_pointer(lb_priv->fp, fp);
120 lb_priv(team)->orig_fprog = fprog; 301 lb_priv->ex->orig_fprog = fprog;
302 return 0;
303}
304
305static int lb_tx_method_get(struct team *team, struct team_gsetter_ctx *ctx)
306{
307 struct lb_priv *lb_priv = get_lb_priv(team);
308 lb_select_tx_port_func_t *func;
309 char *name;
310
311 func = rcu_dereference_protected(lb_priv->select_tx_port_func,
312 lockdep_is_held(&team->lock));
313 name = lb_select_tx_port_get_name(func);
314 BUG_ON(!name);
315 ctx->data.str_val = name;
316 return 0;
317}
318
319static int lb_tx_method_set(struct team *team, struct team_gsetter_ctx *ctx)
320{
321 struct lb_priv *lb_priv = get_lb_priv(team);
322 lb_select_tx_port_func_t *func;
323
324 func = lb_select_tx_port_get_func(ctx->data.str_val);
325 if (!func)
326 return -EINVAL;
327 rcu_assign_pointer(lb_priv->select_tx_port_func, func);
328 return 0;
329}
330
331static int lb_tx_hash_to_port_mapping_init(struct team *team,
332 struct team_option_inst_info *info)
333{
334 struct lb_priv *lb_priv = get_lb_priv(team);
335 unsigned char hash = info->array_index;
336
337 LB_HTPM_OPT_INST_INFO_BY_HASH(lb_priv, hash) = info;
338 return 0;
339}
340
341static int lb_tx_hash_to_port_mapping_get(struct team *team,
342 struct team_gsetter_ctx *ctx)
343{
344 struct lb_priv *lb_priv = get_lb_priv(team);
345 struct team_port *port;
346 unsigned char hash = ctx->info->array_index;
347
348 port = LB_HTPM_PORT_BY_HASH(lb_priv, hash);
349 ctx->data.u32_val = port ? port->dev->ifindex : 0;
350 return 0;
351}
352
353static int lb_tx_hash_to_port_mapping_set(struct team *team,
354 struct team_gsetter_ctx *ctx)
355{
356 struct lb_priv *lb_priv = get_lb_priv(team);
357 struct team_port *port;
358 unsigned char hash = ctx->info->array_index;
359
360 list_for_each_entry(port, &team->port_list, list) {
361 if (ctx->data.u32_val == port->dev->ifindex &&
362 team_port_enabled(port)) {
363 rcu_assign_pointer(LB_HTPM_PORT_BY_HASH(lb_priv, hash),
364 port);
365 return 0;
366 }
367 }
368 return -ENODEV;
369}
370
371static int lb_hash_stats_init(struct team *team,
372 struct team_option_inst_info *info)
373{
374 struct lb_priv *lb_priv = get_lb_priv(team);
375 unsigned char hash = info->array_index;
376
377 lb_priv->ex->stats.info[hash].opt_inst_info = info;
378 return 0;
379}
380
381static int lb_hash_stats_get(struct team *team, struct team_gsetter_ctx *ctx)
382{
383 struct lb_priv *lb_priv = get_lb_priv(team);
384 unsigned char hash = ctx->info->array_index;
385
386 ctx->data.bin_val.ptr = &lb_priv->ex->stats.info[hash].stats;
387 ctx->data.bin_val.len = sizeof(struct lb_stats);
388 return 0;
389}
390
391static int lb_port_stats_init(struct team *team,
392 struct team_option_inst_info *info)
393{
394 struct team_port *port = info->port;
395 struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
396
397 lb_port_priv->stats_info.opt_inst_info = info;
398 return 0;
399}
400
401static int lb_port_stats_get(struct team *team, struct team_gsetter_ctx *ctx)
402{
403 struct team_port *port = ctx->info->port;
404 struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
405
406 ctx->data.bin_val.ptr = &lb_port_priv->stats_info.stats;
407 ctx->data.bin_val.len = sizeof(struct lb_stats);
408 return 0;
409}
410
411static void __lb_stats_info_refresh_prepare(struct lb_stats_info *s_info)
412{
413 memcpy(&s_info->last_stats, &s_info->stats, sizeof(struct lb_stats));
414 memset(&s_info->stats, 0, sizeof(struct lb_stats));
415}
416
417static bool __lb_stats_info_refresh_check(struct lb_stats_info *s_info,
418 struct team *team)
419{
420 if (memcmp(&s_info->last_stats, &s_info->stats,
421 sizeof(struct lb_stats))) {
422 team_option_inst_set_change(s_info->opt_inst_info);
423 return true;
424 }
425 return false;
426}
427
428static void __lb_one_cpu_stats_add(struct lb_stats *acc_stats,
429 struct lb_stats *cpu_stats,
430 struct u64_stats_sync *syncp)
431{
432 unsigned int start;
433 struct lb_stats tmp;
434
435 do {
436 start = u64_stats_fetch_begin_bh(syncp);
437 tmp.tx_bytes = cpu_stats->tx_bytes;
438 } while (u64_stats_fetch_retry_bh(syncp, start));
439 acc_stats->tx_bytes += tmp.tx_bytes;
440}
441
442static void lb_stats_refresh(struct work_struct *work)
443{
444 struct team *team;
445 struct lb_priv *lb_priv;
446 struct lb_priv_ex *lb_priv_ex;
447 struct lb_pcpu_stats *pcpu_stats;
448 struct lb_stats *stats;
449 struct lb_stats_info *s_info;
450 struct team_port *port;
451 bool changed = false;
452 int i;
453 int j;
454
455 lb_priv_ex = container_of(work, struct lb_priv_ex,
456 stats.refresh_dw.work);
457
458 team = lb_priv_ex->team;
459 lb_priv = get_lb_priv(team);
460
461 if (!mutex_trylock(&team->lock)) {
462 schedule_delayed_work(&lb_priv_ex->stats.refresh_dw, 0);
463 return;
464 }
465
466 for (j = 0; j < LB_TX_HASHTABLE_SIZE; j++) {
467 s_info = &lb_priv->ex->stats.info[j];
468 __lb_stats_info_refresh_prepare(s_info);
469 for_each_possible_cpu(i) {
470 pcpu_stats = per_cpu_ptr(lb_priv->pcpu_stats, i);
471 stats = &pcpu_stats->hash_stats[j];
472 __lb_one_cpu_stats_add(&s_info->stats, stats,
473 &pcpu_stats->syncp);
474 }
475 changed |= __lb_stats_info_refresh_check(s_info, team);
476 }
477
478 list_for_each_entry(port, &team->port_list, list) {
479 struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
480
481 s_info = &lb_port_priv->stats_info;
482 __lb_stats_info_refresh_prepare(s_info);
483 for_each_possible_cpu(i) {
484 pcpu_stats = per_cpu_ptr(lb_priv->pcpu_stats, i);
485 stats = per_cpu_ptr(lb_port_priv->pcpu_stats, i);
486 __lb_one_cpu_stats_add(&s_info->stats, stats,
487 &pcpu_stats->syncp);
488 }
489 changed |= __lb_stats_info_refresh_check(s_info, team);
490 }
491
492 if (changed)
493 team_options_change_check(team);
494
495 schedule_delayed_work(&lb_priv_ex->stats.refresh_dw,
496 (lb_priv_ex->stats.refresh_interval * HZ) / 10);
497
498 mutex_unlock(&team->lock);
499}
500
501static int lb_stats_refresh_interval_get(struct team *team,
502 struct team_gsetter_ctx *ctx)
503{
504 struct lb_priv *lb_priv = get_lb_priv(team);
505
506 ctx->data.u32_val = lb_priv->ex->stats.refresh_interval;
507 return 0;
508}
509
510static int lb_stats_refresh_interval_set(struct team *team,
511 struct team_gsetter_ctx *ctx)
512{
513 struct lb_priv *lb_priv = get_lb_priv(team);
514 unsigned int interval;
515
516 interval = ctx->data.u32_val;
517 if (lb_priv->ex->stats.refresh_interval == interval)
518 return 0;
519 lb_priv->ex->stats.refresh_interval = interval;
520 if (interval)
521 schedule_delayed_work(&lb_priv->ex->stats.refresh_dw, 0);
522 else
523 cancel_delayed_work(&lb_priv->ex->stats.refresh_dw);
121 return 0; 524 return 0;
122} 525}
123 526
@@ -128,30 +531,125 @@ static const struct team_option lb_options[] = {
128 .getter = lb_bpf_func_get, 531 .getter = lb_bpf_func_get,
129 .setter = lb_bpf_func_set, 532 .setter = lb_bpf_func_set,
130 }, 533 },
534 {
535 .name = "lb_tx_method",
536 .type = TEAM_OPTION_TYPE_STRING,
537 .getter = lb_tx_method_get,
538 .setter = lb_tx_method_set,
539 },
540 {
541 .name = "lb_tx_hash_to_port_mapping",
542 .array_size = LB_TX_HASHTABLE_SIZE,
543 .type = TEAM_OPTION_TYPE_U32,
544 .init = lb_tx_hash_to_port_mapping_init,
545 .getter = lb_tx_hash_to_port_mapping_get,
546 .setter = lb_tx_hash_to_port_mapping_set,
547 },
548 {
549 .name = "lb_hash_stats",
550 .array_size = LB_TX_HASHTABLE_SIZE,
551 .type = TEAM_OPTION_TYPE_BINARY,
552 .init = lb_hash_stats_init,
553 .getter = lb_hash_stats_get,
554 },
555 {
556 .name = "lb_port_stats",
557 .per_port = true,
558 .type = TEAM_OPTION_TYPE_BINARY,
559 .init = lb_port_stats_init,
560 .getter = lb_port_stats_get,
561 },
562 {
563 .name = "lb_stats_refresh_interval",
564 .type = TEAM_OPTION_TYPE_U32,
565 .getter = lb_stats_refresh_interval_get,
566 .setter = lb_stats_refresh_interval_set,
567 },
131}; 568};
132 569
133static int lb_init(struct team *team) 570static int lb_init(struct team *team)
134{ 571{
135 return team_options_register(team, lb_options, 572 struct lb_priv *lb_priv = get_lb_priv(team);
136 ARRAY_SIZE(lb_options)); 573 lb_select_tx_port_func_t *func;
574 int err;
575
576 /* set default tx port selector */
577 func = lb_select_tx_port_get_func("hash");
578 BUG_ON(!func);
579 rcu_assign_pointer(lb_priv->select_tx_port_func, func);
580
581 lb_priv->ex = kzalloc(sizeof(*lb_priv->ex), GFP_KERNEL);
582 if (!lb_priv->ex)
583 return -ENOMEM;
584 lb_priv->ex->team = team;
585
586 lb_priv->pcpu_stats = alloc_percpu(struct lb_pcpu_stats);
587 if (!lb_priv->pcpu_stats) {
588 err = -ENOMEM;
589 goto err_alloc_pcpu_stats;
590 }
591
592 INIT_DELAYED_WORK(&lb_priv->ex->stats.refresh_dw, lb_stats_refresh);
593
594 err = team_options_register(team, lb_options, ARRAY_SIZE(lb_options));
595 if (err)
596 goto err_options_register;
597 return 0;
598
599err_options_register:
600 free_percpu(lb_priv->pcpu_stats);
601err_alloc_pcpu_stats:
602 kfree(lb_priv->ex);
603 return err;
137} 604}
138 605
139static void lb_exit(struct team *team) 606static void lb_exit(struct team *team)
140{ 607{
608 struct lb_priv *lb_priv = get_lb_priv(team);
609
141 team_options_unregister(team, lb_options, 610 team_options_unregister(team, lb_options,
142 ARRAY_SIZE(lb_options)); 611 ARRAY_SIZE(lb_options));
612 cancel_delayed_work_sync(&lb_priv->ex->stats.refresh_dw);
613 free_percpu(lb_priv->pcpu_stats);
614 kfree(lb_priv->ex);
615}
616
617static int lb_port_enter(struct team *team, struct team_port *port)
618{
619 struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
620
621 lb_port_priv->pcpu_stats = alloc_percpu(struct lb_stats);
622 if (!lb_port_priv->pcpu_stats)
623 return -ENOMEM;
624 return 0;
625}
626
627static void lb_port_leave(struct team *team, struct team_port *port)
628{
629 struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
630
631 free_percpu(lb_port_priv->pcpu_stats);
632}
633
634static void lb_port_disabled(struct team *team, struct team_port *port)
635{
636 lb_tx_hash_to_port_mapping_null_port(team, port);
143} 637}
144 638
145static const struct team_mode_ops lb_mode_ops = { 639static const struct team_mode_ops lb_mode_ops = {
146 .init = lb_init, 640 .init = lb_init,
147 .exit = lb_exit, 641 .exit = lb_exit,
642 .port_enter = lb_port_enter,
643 .port_leave = lb_port_leave,
644 .port_disabled = lb_port_disabled,
148 .transmit = lb_transmit, 645 .transmit = lb_transmit,
149}; 646};
150 647
151static struct team_mode lb_mode = { 648static const struct team_mode lb_mode = {
152 .kind = "loadbalance", 649 .kind = "loadbalance",
153 .owner = THIS_MODULE, 650 .owner = THIS_MODULE,
154 .priv_size = sizeof(struct lb_priv), 651 .priv_size = sizeof(struct lb_priv),
652 .port_priv_size = sizeof(struct lb_port_priv),
155 .ops = &lb_mode_ops, 653 .ops = &lb_mode_ops,
156}; 654};
157 655
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
index 6abfbdc96be5..ad7ed0ec544c 100644
--- a/drivers/net/team/team_mode_roundrobin.c
+++ b/drivers/net/team/team_mode_roundrobin.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * net/drivers/team/team_mode_roundrobin.c - Round-robin mode for team 2 * drivers/net/team/team_mode_roundrobin.c - Round-robin mode for team
3 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com> 3 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
@@ -30,16 +30,16 @@ static struct team_port *__get_first_port_up(struct team *team,
30{ 30{
31 struct team_port *cur; 31 struct team_port *cur;
32 32
33 if (port->linkup) 33 if (team_port_txable(port))
34 return port; 34 return port;
35 cur = port; 35 cur = port;
36 list_for_each_entry_continue_rcu(cur, &team->port_list, list) 36 list_for_each_entry_continue_rcu(cur, &team->port_list, list)
37 if (cur->linkup) 37 if (team_port_txable(port))
38 return cur; 38 return cur;
39 list_for_each_entry_rcu(cur, &team->port_list, list) { 39 list_for_each_entry_rcu(cur, &team->port_list, list) {
40 if (cur == port) 40 if (cur == port)
41 break; 41 break;
42 if (cur->linkup) 42 if (team_port_txable(port))
43 return cur; 43 return cur;
44 } 44 }
45 return NULL; 45 return NULL;
@@ -55,8 +55,7 @@ static bool rr_transmit(struct team *team, struct sk_buff *skb)
55 port = __get_first_port_up(team, port); 55 port = __get_first_port_up(team, port);
56 if (unlikely(!port)) 56 if (unlikely(!port))
57 goto drop; 57 goto drop;
58 skb->dev = port->dev; 58 if (team_dev_queue_xmit(team, port, skb))
59 if (dev_queue_xmit(skb))
60 return false; 59 return false;
61 return true; 60 return true;
62 61
@@ -81,7 +80,7 @@ static const struct team_mode_ops rr_mode_ops = {
81 .port_change_mac = rr_port_change_mac, 80 .port_change_mac = rr_port_change_mac,
82}; 81};
83 82
84static struct team_mode rr_mode = { 83static const struct team_mode rr_mode = {
85 .kind = "roundrobin", 84 .kind = "roundrobin",
86 .owner = THIS_MODULE, 85 .owner = THIS_MODULE,
87 .priv_size = sizeof(struct rr_priv), 86 .priv_size = sizeof(struct rr_priv),
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 987aeefbc774..961fad1f7053 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -22,7 +22,7 @@
22 * Add TUNSETLINK ioctl to set the link encapsulation 22 * Add TUNSETLINK ioctl to set the link encapsulation
23 * 23 *
24 * Mark Smith <markzzzsmith@yahoo.com.au> 24 * Mark Smith <markzzzsmith@yahoo.com.au>
25 * Use random_ether_addr() for tap MAC address. 25 * Use eth_random_addr() for tap MAC address.
26 * 26 *
27 * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20 27 * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20
28 * Fixes in packet dropping, queue length setting and queue wakeup. 28 * Fixes in packet dropping, queue length setting and queue wakeup.
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 833e32f8d63b..c1ae76968f47 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -134,6 +134,7 @@ config USB_NET_AX8817X
134 tristate "ASIX AX88xxx Based USB 2.0 Ethernet Adapters" 134 tristate "ASIX AX88xxx Based USB 2.0 Ethernet Adapters"
135 depends on USB_USBNET 135 depends on USB_USBNET
136 select CRC32 136 select CRC32
137 select PHYLIB
137 default y 138 default y
138 help 139 help
139 This option adds support for ASIX AX88xxx based USB 2.0 140 This option adds support for ASIX AX88xxx based USB 2.0
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index a2e2d72c52a0..bf063008c1af 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_USB_PEGASUS) += pegasus.o
8obj-$(CONFIG_USB_RTL8150) += rtl8150.o 8obj-$(CONFIG_USB_RTL8150) += rtl8150.o
9obj-$(CONFIG_USB_HSO) += hso.o 9obj-$(CONFIG_USB_HSO) += hso.o
10obj-$(CONFIG_USB_NET_AX8817X) += asix.o 10obj-$(CONFIG_USB_NET_AX8817X) += asix.o
11asix-y := asix_devices.o asix_common.o ax88172a.o
11obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o 12obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o
12obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o 13obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o
13obj-$(CONFIG_USB_NET_DM9601) += dm9601.o 14obj-$(CONFIG_USB_NET_DM9601) += dm9601.o
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
new file mode 100644
index 000000000000..e889631161b8
--- /dev/null
+++ b/drivers/net/usb/asix.h
@@ -0,0 +1,218 @@
1/*
2 * ASIX AX8817X based USB 2.0 Ethernet Devices
3 * Copyright (C) 2003-2006 David Hollis <dhollis@davehollis.com>
4 * Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net>
5 * Copyright (C) 2006 James Painter <jamie.painter@iname.com>
6 * Copyright (c) 2002-2003 TiVo Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#ifndef _ASIX_H
24#define _ASIX_H
25
26// #define DEBUG // error path messages, extra info
27// #define VERBOSE // more; success messages
28
29#include <linux/module.h>
30#include <linux/kmod.h>
31#include <linux/init.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/ethtool.h>
35#include <linux/workqueue.h>
36#include <linux/mii.h>
37#include <linux/usb.h>
38#include <linux/crc32.h>
39#include <linux/usb/usbnet.h>
40#include <linux/slab.h>
41#include <linux/if_vlan.h>
42
43#define DRIVER_VERSION "22-Dec-2011"
44#define DRIVER_NAME "asix"
45
46/* ASIX AX8817X based USB 2.0 Ethernet Devices */
47
48#define AX_CMD_SET_SW_MII 0x06
49#define AX_CMD_READ_MII_REG 0x07
50#define AX_CMD_WRITE_MII_REG 0x08
51#define AX_CMD_SET_HW_MII 0x0a
52#define AX_CMD_READ_EEPROM 0x0b
53#define AX_CMD_WRITE_EEPROM 0x0c
54#define AX_CMD_WRITE_ENABLE 0x0d
55#define AX_CMD_WRITE_DISABLE 0x0e
56#define AX_CMD_READ_RX_CTL 0x0f
57#define AX_CMD_WRITE_RX_CTL 0x10
58#define AX_CMD_READ_IPG012 0x11
59#define AX_CMD_WRITE_IPG0 0x12
60#define AX_CMD_WRITE_IPG1 0x13
61#define AX_CMD_READ_NODE_ID 0x13
62#define AX_CMD_WRITE_NODE_ID 0x14
63#define AX_CMD_WRITE_IPG2 0x14
64#define AX_CMD_WRITE_MULTI_FILTER 0x16
65#define AX88172_CMD_READ_NODE_ID 0x17
66#define AX_CMD_READ_PHY_ID 0x19
67#define AX_CMD_READ_MEDIUM_STATUS 0x1a
68#define AX_CMD_WRITE_MEDIUM_MODE 0x1b
69#define AX_CMD_READ_MONITOR_MODE 0x1c
70#define AX_CMD_WRITE_MONITOR_MODE 0x1d
71#define AX_CMD_READ_GPIOS 0x1e
72#define AX_CMD_WRITE_GPIOS 0x1f
73#define AX_CMD_SW_RESET 0x20
74#define AX_CMD_SW_PHY_STATUS 0x21
75#define AX_CMD_SW_PHY_SELECT 0x22
76
77#define AX_PHY_SELECT_MASK (BIT(3) | BIT(2))
78#define AX_PHY_SELECT_INTERNAL 0
79#define AX_PHY_SELECT_EXTERNAL BIT(2)
80
81#define AX_MONITOR_MODE 0x01
82#define AX_MONITOR_LINK 0x02
83#define AX_MONITOR_MAGIC 0x04
84#define AX_MONITOR_HSFS 0x10
85
86/* AX88172 Medium Status Register values */
87#define AX88172_MEDIUM_FD 0x02
88#define AX88172_MEDIUM_TX 0x04
89#define AX88172_MEDIUM_FC 0x10
90#define AX88172_MEDIUM_DEFAULT \
91 ( AX88172_MEDIUM_FD | AX88172_MEDIUM_TX | AX88172_MEDIUM_FC )
92
93#define AX_MCAST_FILTER_SIZE 8
94#define AX_MAX_MCAST 64
95
96#define AX_SWRESET_CLEAR 0x00
97#define AX_SWRESET_RR 0x01
98#define AX_SWRESET_RT 0x02
99#define AX_SWRESET_PRTE 0x04
100#define AX_SWRESET_PRL 0x08
101#define AX_SWRESET_BZ 0x10
102#define AX_SWRESET_IPRL 0x20
103#define AX_SWRESET_IPPD 0x40
104
105#define AX88772_IPG0_DEFAULT 0x15
106#define AX88772_IPG1_DEFAULT 0x0c
107#define AX88772_IPG2_DEFAULT 0x12
108
109/* AX88772 & AX88178 Medium Mode Register */
110#define AX_MEDIUM_PF 0x0080
111#define AX_MEDIUM_JFE 0x0040
112#define AX_MEDIUM_TFC 0x0020
113#define AX_MEDIUM_RFC 0x0010
114#define AX_MEDIUM_ENCK 0x0008
115#define AX_MEDIUM_AC 0x0004
116#define AX_MEDIUM_FD 0x0002
117#define AX_MEDIUM_GM 0x0001
118#define AX_MEDIUM_SM 0x1000
119#define AX_MEDIUM_SBP 0x0800
120#define AX_MEDIUM_PS 0x0200
121#define AX_MEDIUM_RE 0x0100
122
123#define AX88178_MEDIUM_DEFAULT \
124 (AX_MEDIUM_PS | AX_MEDIUM_FD | AX_MEDIUM_AC | \
125 AX_MEDIUM_RFC | AX_MEDIUM_TFC | AX_MEDIUM_JFE | \
126 AX_MEDIUM_RE)
127
128#define AX88772_MEDIUM_DEFAULT \
129 (AX_MEDIUM_FD | AX_MEDIUM_RFC | \
130 AX_MEDIUM_TFC | AX_MEDIUM_PS | \
131 AX_MEDIUM_AC | AX_MEDIUM_RE)
132
133/* AX88772 & AX88178 RX_CTL values */
134#define AX_RX_CTL_SO 0x0080
135#define AX_RX_CTL_AP 0x0020
136#define AX_RX_CTL_AM 0x0010
137#define AX_RX_CTL_AB 0x0008
138#define AX_RX_CTL_SEP 0x0004
139#define AX_RX_CTL_AMALL 0x0002
140#define AX_RX_CTL_PRO 0x0001
141#define AX_RX_CTL_MFB_2048 0x0000
142#define AX_RX_CTL_MFB_4096 0x0100
143#define AX_RX_CTL_MFB_8192 0x0200
144#define AX_RX_CTL_MFB_16384 0x0300
145
146#define AX_DEFAULT_RX_CTL (AX_RX_CTL_SO | AX_RX_CTL_AB)
147
148/* GPIO 0 .. 2 toggles */
149#define AX_GPIO_GPO0EN 0x01 /* GPIO0 Output enable */
150#define AX_GPIO_GPO_0 0x02 /* GPIO0 Output value */
151#define AX_GPIO_GPO1EN 0x04 /* GPIO1 Output enable */
152#define AX_GPIO_GPO_1 0x08 /* GPIO1 Output value */
153#define AX_GPIO_GPO2EN 0x10 /* GPIO2 Output enable */
154#define AX_GPIO_GPO_2 0x20 /* GPIO2 Output value */
155#define AX_GPIO_RESERVED 0x40 /* Reserved */
156#define AX_GPIO_RSE 0x80 /* Reload serial EEPROM */
157
158#define AX_EEPROM_MAGIC 0xdeadbeef
159#define AX_EEPROM_LEN 0x200
160
161/* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */
162struct asix_data {
163 u8 multi_filter[AX_MCAST_FILTER_SIZE];
164 u8 mac_addr[ETH_ALEN];
165 u8 phymode;
166 u8 ledmode;
167 u8 res;
168};
169
170int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
171 u16 size, void *data);
172
173int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
174 u16 size, void *data);
175
176void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value,
177 u16 index, u16 size, void *data);
178
179int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb);
180
181struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
182 gfp_t flags);
183
184int asix_set_sw_mii(struct usbnet *dev);
185int asix_set_hw_mii(struct usbnet *dev);
186
187int asix_read_phy_addr(struct usbnet *dev, int internal);
188int asix_get_phy_addr(struct usbnet *dev);
189
190int asix_sw_reset(struct usbnet *dev, u8 flags);
191
192u16 asix_read_rx_ctl(struct usbnet *dev);
193int asix_write_rx_ctl(struct usbnet *dev, u16 mode);
194
195u16 asix_read_medium_status(struct usbnet *dev);
196int asix_write_medium_mode(struct usbnet *dev, u16 mode);
197
198int asix_write_gpio(struct usbnet *dev, u16 value, int sleep);
199
200void asix_set_multicast(struct net_device *net);
201
202int asix_mdio_read(struct net_device *netdev, int phy_id, int loc);
203void asix_mdio_write(struct net_device *netdev, int phy_id, int loc, int val);
204
205void asix_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo);
206int asix_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo);
207
208int asix_get_eeprom_len(struct net_device *net);
209int asix_get_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
210 u8 *data);
211int asix_set_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
212 u8 *data);
213
214void asix_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info);
215
216int asix_set_mac_address(struct net_device *net, void *p);
217
218#endif /* _ASIX_H */
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
new file mode 100644
index 000000000000..774d9ce2dafc
--- /dev/null
+++ b/drivers/net/usb/asix_common.c
@@ -0,0 +1,631 @@
1/*
2 * ASIX AX8817X based USB 2.0 Ethernet Devices
3 * Copyright (C) 2003-2006 David Hollis <dhollis@davehollis.com>
4 * Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net>
5 * Copyright (C) 2006 James Painter <jamie.painter@iname.com>
6 * Copyright (c) 2002-2003 TiVo Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include "asix.h"
24
25int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
26 u16 size, void *data)
27{
28 void *buf;
29 int err = -ENOMEM;
30
31 netdev_dbg(dev->net, "asix_read_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
32 cmd, value, index, size);
33
34 buf = kmalloc(size, GFP_KERNEL);
35 if (!buf)
36 goto out;
37
38 err = usb_control_msg(
39 dev->udev,
40 usb_rcvctrlpipe(dev->udev, 0),
41 cmd,
42 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
43 value,
44 index,
45 buf,
46 size,
47 USB_CTRL_GET_TIMEOUT);
48 if (err == size)
49 memcpy(data, buf, size);
50 else if (err >= 0)
51 err = -EINVAL;
52 kfree(buf);
53
54out:
55 return err;
56}
57
58int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
59 u16 size, void *data)
60{
61 void *buf = NULL;
62 int err = -ENOMEM;
63
64 netdev_dbg(dev->net, "asix_write_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
65 cmd, value, index, size);
66
67 if (data) {
68 buf = kmemdup(data, size, GFP_KERNEL);
69 if (!buf)
70 goto out;
71 }
72
73 err = usb_control_msg(
74 dev->udev,
75 usb_sndctrlpipe(dev->udev, 0),
76 cmd,
77 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
78 value,
79 index,
80 buf,
81 size,
82 USB_CTRL_SET_TIMEOUT);
83 kfree(buf);
84
85out:
86 return err;
87}
88
89static void asix_async_cmd_callback(struct urb *urb)
90{
91 struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
92 int status = urb->status;
93
94 if (status < 0)
95 printk(KERN_DEBUG "asix_async_cmd_callback() failed with %d",
96 status);
97
98 kfree(req);
99 usb_free_urb(urb);
100}
101
102void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
103 u16 size, void *data)
104{
105 struct usb_ctrlrequest *req;
106 int status;
107 struct urb *urb;
108
109 netdev_dbg(dev->net, "asix_write_cmd_async() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
110 cmd, value, index, size);
111
112 urb = usb_alloc_urb(0, GFP_ATOMIC);
113 if (!urb) {
114 netdev_err(dev->net, "Error allocating URB in write_cmd_async!\n");
115 return;
116 }
117
118 req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
119 if (!req) {
120 netdev_err(dev->net, "Failed to allocate memory for control request\n");
121 usb_free_urb(urb);
122 return;
123 }
124
125 req->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
126 req->bRequest = cmd;
127 req->wValue = cpu_to_le16(value);
128 req->wIndex = cpu_to_le16(index);
129 req->wLength = cpu_to_le16(size);
130
131 usb_fill_control_urb(urb, dev->udev,
132 usb_sndctrlpipe(dev->udev, 0),
133 (void *)req, data, size,
134 asix_async_cmd_callback, req);
135
136 status = usb_submit_urb(urb, GFP_ATOMIC);
137 if (status < 0) {
138 netdev_err(dev->net, "Error submitting the control message: status=%d\n",
139 status);
140 kfree(req);
141 usb_free_urb(urb);
142 }
143}
144
145int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
146{
147 int offset = 0;
148
149 while (offset + sizeof(u32) < skb->len) {
150 struct sk_buff *ax_skb;
151 u16 size;
152 u32 header = get_unaligned_le32(skb->data + offset);
153
154 offset += sizeof(u32);
155
156 /* get the packet length */
157 size = (u16) (header & 0x7ff);
158 if (size != ((~header >> 16) & 0x07ff)) {
159 netdev_err(dev->net, "asix_rx_fixup() Bad Header Length\n");
160 return 0;
161 }
162
163 if ((size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) ||
164 (size + offset > skb->len)) {
165 netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
166 size);
167 return 0;
168 }
169 ax_skb = netdev_alloc_skb_ip_align(dev->net, size);
170 if (!ax_skb)
171 return 0;
172
173 skb_put(ax_skb, size);
174 memcpy(ax_skb->data, skb->data + offset, size);
175 usbnet_skb_return(dev, ax_skb);
176
177 offset += (size + 1) & 0xfffe;
178 }
179
180 if (skb->len != offset) {
181 netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d\n",
182 skb->len);
183 return 0;
184 }
185 return 1;
186}
187
188struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
189 gfp_t flags)
190{
191 int padlen;
192 int headroom = skb_headroom(skb);
193 int tailroom = skb_tailroom(skb);
194 u32 packet_len;
195 u32 padbytes = 0xffff0000;
196
197 padlen = ((skb->len + 4) & (dev->maxpacket - 1)) ? 0 : 4;
198
199 /* We need to push 4 bytes in front of frame (packet_len)
200 * and maybe add 4 bytes after the end (if padlen is 4)
201 *
202 * Avoid skb_copy_expand() expensive call, using following rules :
203 * - We are allowed to push 4 bytes in headroom if skb_header_cloned()
204 * is false (and if we have 4 bytes of headroom)
205 * - We are allowed to put 4 bytes at tail if skb_cloned()
206 * is false (and if we have 4 bytes of tailroom)
207 *
208 * TCP packets for example are cloned, but skb_header_release()
209 * was called in tcp stack, allowing us to use headroom for our needs.
210 */
211 if (!skb_header_cloned(skb) &&
212 !(padlen && skb_cloned(skb)) &&
213 headroom + tailroom >= 4 + padlen) {
214 /* following should not happen, but better be safe */
215 if (headroom < 4 ||
216 tailroom < padlen) {
217 skb->data = memmove(skb->head + 4, skb->data, skb->len);
218 skb_set_tail_pointer(skb, skb->len);
219 }
220 } else {
221 struct sk_buff *skb2;
222
223 skb2 = skb_copy_expand(skb, 4, padlen, flags);
224 dev_kfree_skb_any(skb);
225 skb = skb2;
226 if (!skb)
227 return NULL;
228 }
229
230 packet_len = ((skb->len ^ 0x0000ffff) << 16) + skb->len;
231 skb_push(skb, 4);
232 cpu_to_le32s(&packet_len);
233 skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len));
234
235 if (padlen) {
236 cpu_to_le32s(&padbytes);
237 memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
238 skb_put(skb, sizeof(padbytes));
239 }
240 return skb;
241}
242
243int asix_set_sw_mii(struct usbnet *dev)
244{
245 int ret;
246 ret = asix_write_cmd(dev, AX_CMD_SET_SW_MII, 0x0000, 0, 0, NULL);
247 if (ret < 0)
248 netdev_err(dev->net, "Failed to enable software MII access\n");
249 return ret;
250}
251
252int asix_set_hw_mii(struct usbnet *dev)
253{
254 int ret;
255 ret = asix_write_cmd(dev, AX_CMD_SET_HW_MII, 0x0000, 0, 0, NULL);
256 if (ret < 0)
257 netdev_err(dev->net, "Failed to enable hardware MII access\n");
258 return ret;
259}
260
261int asix_read_phy_addr(struct usbnet *dev, int internal)
262{
263 int offset = (internal ? 1 : 0);
264 u8 buf[2];
265 int ret = asix_read_cmd(dev, AX_CMD_READ_PHY_ID, 0, 0, 2, buf);
266
267 netdev_dbg(dev->net, "asix_get_phy_addr()\n");
268
269 if (ret < 0) {
270 netdev_err(dev->net, "Error reading PHYID register: %02x\n", ret);
271 goto out;
272 }
273 netdev_dbg(dev->net, "asix_get_phy_addr() returning 0x%04x\n",
274 *((__le16 *)buf));
275 ret = buf[offset];
276
277out:
278 return ret;
279}
280
281int asix_get_phy_addr(struct usbnet *dev)
282{
283 /* return the address of the internal phy */
284 return asix_read_phy_addr(dev, 1);
285}
286
287
288int asix_sw_reset(struct usbnet *dev, u8 flags)
289{
290 int ret;
291
292 ret = asix_write_cmd(dev, AX_CMD_SW_RESET, flags, 0, 0, NULL);
293 if (ret < 0)
294 netdev_err(dev->net, "Failed to send software reset: %02x\n", ret);
295
296 return ret;
297}
298
299u16 asix_read_rx_ctl(struct usbnet *dev)
300{
301 __le16 v;
302 int ret = asix_read_cmd(dev, AX_CMD_READ_RX_CTL, 0, 0, 2, &v);
303
304 if (ret < 0) {
305 netdev_err(dev->net, "Error reading RX_CTL register: %02x\n", ret);
306 goto out;
307 }
308 ret = le16_to_cpu(v);
309out:
310 return ret;
311}
312
313int asix_write_rx_ctl(struct usbnet *dev, u16 mode)
314{
315 int ret;
316
317 netdev_dbg(dev->net, "asix_write_rx_ctl() - mode = 0x%04x\n", mode);
318 ret = asix_write_cmd(dev, AX_CMD_WRITE_RX_CTL, mode, 0, 0, NULL);
319 if (ret < 0)
320 netdev_err(dev->net, "Failed to write RX_CTL mode to 0x%04x: %02x\n",
321 mode, ret);
322
323 return ret;
324}
325
326u16 asix_read_medium_status(struct usbnet *dev)
327{
328 __le16 v;
329 int ret = asix_read_cmd(dev, AX_CMD_READ_MEDIUM_STATUS, 0, 0, 2, &v);
330
331 if (ret < 0) {
332 netdev_err(dev->net, "Error reading Medium Status register: %02x\n",
333 ret);
334 return ret; /* TODO: callers not checking for error ret */
335 }
336
337 return le16_to_cpu(v);
338
339}
340
341int asix_write_medium_mode(struct usbnet *dev, u16 mode)
342{
343 int ret;
344
345 netdev_dbg(dev->net, "asix_write_medium_mode() - mode = 0x%04x\n", mode);
346 ret = asix_write_cmd(dev, AX_CMD_WRITE_MEDIUM_MODE, mode, 0, 0, NULL);
347 if (ret < 0)
348 netdev_err(dev->net, "Failed to write Medium Mode mode to 0x%04x: %02x\n",
349 mode, ret);
350
351 return ret;
352}
353
354int asix_write_gpio(struct usbnet *dev, u16 value, int sleep)
355{
356 int ret;
357
358 netdev_dbg(dev->net, "asix_write_gpio() - value = 0x%04x\n", value);
359 ret = asix_write_cmd(dev, AX_CMD_WRITE_GPIOS, value, 0, 0, NULL);
360 if (ret < 0)
361 netdev_err(dev->net, "Failed to write GPIO value 0x%04x: %02x\n",
362 value, ret);
363
364 if (sleep)
365 msleep(sleep);
366
367 return ret;
368}
369
370/*
371 * AX88772 & AX88178 have a 16-bit RX_CTL value
372 */
373void asix_set_multicast(struct net_device *net)
374{
375 struct usbnet *dev = netdev_priv(net);
376 struct asix_data *data = (struct asix_data *)&dev->data;
377 u16 rx_ctl = AX_DEFAULT_RX_CTL;
378
379 if (net->flags & IFF_PROMISC) {
380 rx_ctl |= AX_RX_CTL_PRO;
381 } else if (net->flags & IFF_ALLMULTI ||
382 netdev_mc_count(net) > AX_MAX_MCAST) {
383 rx_ctl |= AX_RX_CTL_AMALL;
384 } else if (netdev_mc_empty(net)) {
385 /* just broadcast and directed */
386 } else {
387 /* We use the 20 byte dev->data
388 * for our 8 byte filter buffer
389 * to avoid allocating memory that
390 * is tricky to free later */
391 struct netdev_hw_addr *ha;
392 u32 crc_bits;
393
394 memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
395
396 /* Build the multicast hash filter. */
397 netdev_for_each_mc_addr(ha, net) {
398 crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
399 data->multi_filter[crc_bits >> 3] |=
400 1 << (crc_bits & 7);
401 }
402
403 asix_write_cmd_async(dev, AX_CMD_WRITE_MULTI_FILTER, 0, 0,
404 AX_MCAST_FILTER_SIZE, data->multi_filter);
405
406 rx_ctl |= AX_RX_CTL_AM;
407 }
408
409 asix_write_cmd_async(dev, AX_CMD_WRITE_RX_CTL, rx_ctl, 0, 0, NULL);
410}
411
412int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
413{
414 struct usbnet *dev = netdev_priv(netdev);
415 __le16 res;
416
417 mutex_lock(&dev->phy_mutex);
418 asix_set_sw_mii(dev);
419 asix_read_cmd(dev, AX_CMD_READ_MII_REG, phy_id,
420 (__u16)loc, 2, &res);
421 asix_set_hw_mii(dev);
422 mutex_unlock(&dev->phy_mutex);
423
424 netdev_dbg(dev->net, "asix_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",
425 phy_id, loc, le16_to_cpu(res));
426
427 return le16_to_cpu(res);
428}
429
430void asix_mdio_write(struct net_device *netdev, int phy_id, int loc, int val)
431{
432 struct usbnet *dev = netdev_priv(netdev);
433 __le16 res = cpu_to_le16(val);
434
435 netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
436 phy_id, loc, val);
437 mutex_lock(&dev->phy_mutex);
438 asix_set_sw_mii(dev);
439 asix_write_cmd(dev, AX_CMD_WRITE_MII_REG, phy_id, (__u16)loc, 2, &res);
440 asix_set_hw_mii(dev);
441 mutex_unlock(&dev->phy_mutex);
442}
443
444void asix_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
445{
446 struct usbnet *dev = netdev_priv(net);
447 u8 opt;
448
449 if (asix_read_cmd(dev, AX_CMD_READ_MONITOR_MODE, 0, 0, 1, &opt) < 0) {
450 wolinfo->supported = 0;
451 wolinfo->wolopts = 0;
452 return;
453 }
454 wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
455 wolinfo->wolopts = 0;
456 if (opt & AX_MONITOR_LINK)
457 wolinfo->wolopts |= WAKE_PHY;
458 if (opt & AX_MONITOR_MAGIC)
459 wolinfo->wolopts |= WAKE_MAGIC;
460}
461
462int asix_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
463{
464 struct usbnet *dev = netdev_priv(net);
465 u8 opt = 0;
466
467 if (wolinfo->wolopts & WAKE_PHY)
468 opt |= AX_MONITOR_LINK;
469 if (wolinfo->wolopts & WAKE_MAGIC)
470 opt |= AX_MONITOR_MAGIC;
471
472 if (asix_write_cmd(dev, AX_CMD_WRITE_MONITOR_MODE,
473 opt, 0, 0, NULL) < 0)
474 return -EINVAL;
475
476 return 0;
477}
478
479int asix_get_eeprom_len(struct net_device *net)
480{
481 return AX_EEPROM_LEN;
482}
483
484int asix_get_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
485 u8 *data)
486{
487 struct usbnet *dev = netdev_priv(net);
488 u16 *eeprom_buff;
489 int first_word, last_word;
490 int i;
491
492 if (eeprom->len == 0)
493 return -EINVAL;
494
495 eeprom->magic = AX_EEPROM_MAGIC;
496
497 first_word = eeprom->offset >> 1;
498 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
499
500 eeprom_buff = kmalloc(sizeof(u16) * (last_word - first_word + 1),
501 GFP_KERNEL);
502 if (!eeprom_buff)
503 return -ENOMEM;
504
505 /* ax8817x returns 2 bytes from eeprom on read */
506 for (i = first_word; i <= last_word; i++) {
507 if (asix_read_cmd(dev, AX_CMD_READ_EEPROM, i, 0, 2,
508 &(eeprom_buff[i - first_word])) < 0) {
509 kfree(eeprom_buff);
510 return -EIO;
511 }
512 }
513
514 memcpy(data, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
515 kfree(eeprom_buff);
516 return 0;
517}
518
519int asix_set_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
520 u8 *data)
521{
522 struct usbnet *dev = netdev_priv(net);
523 u16 *eeprom_buff;
524 int first_word, last_word;
525 int i;
526 int ret;
527
528 netdev_dbg(net, "write EEPROM len %d, offset %d, magic 0x%x\n",
529 eeprom->len, eeprom->offset, eeprom->magic);
530
531 if (eeprom->len == 0)
532 return -EINVAL;
533
534 if (eeprom->magic != AX_EEPROM_MAGIC)
535 return -EINVAL;
536
537 first_word = eeprom->offset >> 1;
538 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
539
540 eeprom_buff = kmalloc(sizeof(u16) * (last_word - first_word + 1),
541 GFP_KERNEL);
542 if (!eeprom_buff)
543 return -ENOMEM;
544
545 /* align data to 16 bit boundaries, read the missing data from
546 the EEPROM */
547 if (eeprom->offset & 1) {
548 ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, first_word, 0, 2,
549 &(eeprom_buff[0]));
550 if (ret < 0) {
551 netdev_err(net, "Failed to read EEPROM at offset 0x%02x.\n", first_word);
552 goto free;
553 }
554 }
555
556 if ((eeprom->offset + eeprom->len) & 1) {
557 ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, last_word, 0, 2,
558 &(eeprom_buff[last_word - first_word]));
559 if (ret < 0) {
560 netdev_err(net, "Failed to read EEPROM at offset 0x%02x.\n", last_word);
561 goto free;
562 }
563 }
564
565 memcpy((u8 *)eeprom_buff + (eeprom->offset & 1), data, eeprom->len);
566
567 /* write data to EEPROM */
568 ret = asix_write_cmd(dev, AX_CMD_WRITE_ENABLE, 0x0000, 0, 0, NULL);
569 if (ret < 0) {
570 netdev_err(net, "Failed to enable EEPROM write\n");
571 goto free;
572 }
573 msleep(20);
574
575 for (i = first_word; i <= last_word; i++) {
576 netdev_dbg(net, "write to EEPROM at offset 0x%02x, data 0x%04x\n",
577 i, eeprom_buff[i - first_word]);
578 ret = asix_write_cmd(dev, AX_CMD_WRITE_EEPROM, i,
579 eeprom_buff[i - first_word], 0, NULL);
580 if (ret < 0) {
581 netdev_err(net, "Failed to write EEPROM at offset 0x%02x.\n",
582 i);
583 goto free;
584 }
585 msleep(20);
586 }
587
588 ret = asix_write_cmd(dev, AX_CMD_WRITE_DISABLE, 0x0000, 0, 0, NULL);
589 if (ret < 0) {
590 netdev_err(net, "Failed to disable EEPROM write\n");
591 goto free;
592 }
593
594 ret = 0;
595free:
596 kfree(eeprom_buff);
597 return ret;
598}
599
600void asix_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
601{
602 /* Inherit standard device info */
603 usbnet_get_drvinfo(net, info);
604 strncpy (info->driver, DRIVER_NAME, sizeof info->driver);
605 strncpy (info->version, DRIVER_VERSION, sizeof info->version);
606 info->eedump_len = AX_EEPROM_LEN;
607}
608
609int asix_set_mac_address(struct net_device *net, void *p)
610{
611 struct usbnet *dev = netdev_priv(net);
612 struct asix_data *data = (struct asix_data *)&dev->data;
613 struct sockaddr *addr = p;
614
615 if (netif_running(net))
616 return -EBUSY;
617 if (!is_valid_ether_addr(addr->sa_data))
618 return -EADDRNOTAVAIL;
619
620 memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
621
622 /* We use the 20 byte dev->data
623 * for our 6 byte mac buffer
624 * to avoid allocating memory that
625 * is tricky to free later */
626 memcpy(data->mac_addr, addr->sa_data, ETH_ALEN);
627 asix_write_cmd_async(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
628 data->mac_addr);
629
630 return 0;
631}
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix_devices.c
index 3ae80eccd0ef..4fd48df6b989 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix_devices.c
@@ -20,137 +20,7 @@
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */ 21 */
22 22
23// #define DEBUG // error path messages, extra info 23#include "asix.h"
24// #define VERBOSE // more; success messages
25
26#include <linux/module.h>
27#include <linux/kmod.h>
28#include <linux/init.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/ethtool.h>
32#include <linux/workqueue.h>
33#include <linux/mii.h>
34#include <linux/usb.h>
35#include <linux/crc32.h>
36#include <linux/usb/usbnet.h>
37#include <linux/slab.h>
38#include <linux/if_vlan.h>
39
40#define DRIVER_VERSION "22-Dec-2011"
41#define DRIVER_NAME "asix"
42
43/* ASIX AX8817X based USB 2.0 Ethernet Devices */
44
45#define AX_CMD_SET_SW_MII 0x06
46#define AX_CMD_READ_MII_REG 0x07
47#define AX_CMD_WRITE_MII_REG 0x08
48#define AX_CMD_SET_HW_MII 0x0a
49#define AX_CMD_READ_EEPROM 0x0b
50#define AX_CMD_WRITE_EEPROM 0x0c
51#define AX_CMD_WRITE_ENABLE 0x0d
52#define AX_CMD_WRITE_DISABLE 0x0e
53#define AX_CMD_READ_RX_CTL 0x0f
54#define AX_CMD_WRITE_RX_CTL 0x10
55#define AX_CMD_READ_IPG012 0x11
56#define AX_CMD_WRITE_IPG0 0x12
57#define AX_CMD_WRITE_IPG1 0x13
58#define AX_CMD_READ_NODE_ID 0x13
59#define AX_CMD_WRITE_NODE_ID 0x14
60#define AX_CMD_WRITE_IPG2 0x14
61#define AX_CMD_WRITE_MULTI_FILTER 0x16
62#define AX88172_CMD_READ_NODE_ID 0x17
63#define AX_CMD_READ_PHY_ID 0x19
64#define AX_CMD_READ_MEDIUM_STATUS 0x1a
65#define AX_CMD_WRITE_MEDIUM_MODE 0x1b
66#define AX_CMD_READ_MONITOR_MODE 0x1c
67#define AX_CMD_WRITE_MONITOR_MODE 0x1d
68#define AX_CMD_READ_GPIOS 0x1e
69#define AX_CMD_WRITE_GPIOS 0x1f
70#define AX_CMD_SW_RESET 0x20
71#define AX_CMD_SW_PHY_STATUS 0x21
72#define AX_CMD_SW_PHY_SELECT 0x22
73
74#define AX_MONITOR_MODE 0x01
75#define AX_MONITOR_LINK 0x02
76#define AX_MONITOR_MAGIC 0x04
77#define AX_MONITOR_HSFS 0x10
78
79/* AX88172 Medium Status Register values */
80#define AX88172_MEDIUM_FD 0x02
81#define AX88172_MEDIUM_TX 0x04
82#define AX88172_MEDIUM_FC 0x10
83#define AX88172_MEDIUM_DEFAULT \
84 ( AX88172_MEDIUM_FD | AX88172_MEDIUM_TX | AX88172_MEDIUM_FC )
85
86#define AX_MCAST_FILTER_SIZE 8
87#define AX_MAX_MCAST 64
88
89#define AX_SWRESET_CLEAR 0x00
90#define AX_SWRESET_RR 0x01
91#define AX_SWRESET_RT 0x02
92#define AX_SWRESET_PRTE 0x04
93#define AX_SWRESET_PRL 0x08
94#define AX_SWRESET_BZ 0x10
95#define AX_SWRESET_IPRL 0x20
96#define AX_SWRESET_IPPD 0x40
97
98#define AX88772_IPG0_DEFAULT 0x15
99#define AX88772_IPG1_DEFAULT 0x0c
100#define AX88772_IPG2_DEFAULT 0x12
101
102/* AX88772 & AX88178 Medium Mode Register */
103#define AX_MEDIUM_PF 0x0080
104#define AX_MEDIUM_JFE 0x0040
105#define AX_MEDIUM_TFC 0x0020
106#define AX_MEDIUM_RFC 0x0010
107#define AX_MEDIUM_ENCK 0x0008
108#define AX_MEDIUM_AC 0x0004
109#define AX_MEDIUM_FD 0x0002
110#define AX_MEDIUM_GM 0x0001
111#define AX_MEDIUM_SM 0x1000
112#define AX_MEDIUM_SBP 0x0800
113#define AX_MEDIUM_PS 0x0200
114#define AX_MEDIUM_RE 0x0100
115
116#define AX88178_MEDIUM_DEFAULT \
117 (AX_MEDIUM_PS | AX_MEDIUM_FD | AX_MEDIUM_AC | \
118 AX_MEDIUM_RFC | AX_MEDIUM_TFC | AX_MEDIUM_JFE | \
119 AX_MEDIUM_RE)
120
121#define AX88772_MEDIUM_DEFAULT \
122 (AX_MEDIUM_FD | AX_MEDIUM_RFC | \
123 AX_MEDIUM_TFC | AX_MEDIUM_PS | \
124 AX_MEDIUM_AC | AX_MEDIUM_RE)
125
126/* AX88772 & AX88178 RX_CTL values */
127#define AX_RX_CTL_SO 0x0080
128#define AX_RX_CTL_AP 0x0020
129#define AX_RX_CTL_AM 0x0010
130#define AX_RX_CTL_AB 0x0008
131#define AX_RX_CTL_SEP 0x0004
132#define AX_RX_CTL_AMALL 0x0002
133#define AX_RX_CTL_PRO 0x0001
134#define AX_RX_CTL_MFB_2048 0x0000
135#define AX_RX_CTL_MFB_4096 0x0100
136#define AX_RX_CTL_MFB_8192 0x0200
137#define AX_RX_CTL_MFB_16384 0x0300
138
139#define AX_DEFAULT_RX_CTL (AX_RX_CTL_SO | AX_RX_CTL_AB)
140
141/* GPIO 0 .. 2 toggles */
142#define AX_GPIO_GPO0EN 0x01 /* GPIO0 Output enable */
143#define AX_GPIO_GPO_0 0x02 /* GPIO0 Output value */
144#define AX_GPIO_GPO1EN 0x04 /* GPIO1 Output enable */
145#define AX_GPIO_GPO_1 0x08 /* GPIO1 Output value */
146#define AX_GPIO_GPO2EN 0x10 /* GPIO2 Output enable */
147#define AX_GPIO_GPO_2 0x20 /* GPIO2 Output value */
148#define AX_GPIO_RESERVED 0x40 /* Reserved */
149#define AX_GPIO_RSE 0x80 /* Reload serial EEPROM */
150
151#define AX_EEPROM_MAGIC 0xdeadbeef
152#define AX88172_EEPROM_LEN 0x40
153#define AX88772_EEPROM_LEN 0xff
154 24
155#define PHY_MODE_MARVELL 0x0000 25#define PHY_MODE_MARVELL 0x0000
156#define MII_MARVELL_LED_CTRL 0x0018 26#define MII_MARVELL_LED_CTRL 0x0018
@@ -166,15 +36,6 @@
166 36
167#define PHY_MODE_RTL8211CL 0x000C 37#define PHY_MODE_RTL8211CL 0x000C
168 38
169/* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */
170struct asix_data {
171 u8 multi_filter[AX_MCAST_FILTER_SIZE];
172 u8 mac_addr[ETH_ALEN];
173 u8 phymode;
174 u8 ledmode;
175 u8 eeprom_len;
176};
177
178struct ax88172_int_data { 39struct ax88172_int_data {
179 __le16 res1; 40 __le16 res1;
180 u8 link; 41 u8 link;
@@ -183,209 +44,6 @@ struct ax88172_int_data {
183 __le16 res3; 44 __le16 res3;
184} __packed; 45} __packed;
185 46
186static int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
187 u16 size, void *data)
188{
189 void *buf;
190 int err = -ENOMEM;
191
192 netdev_dbg(dev->net, "asix_read_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
193 cmd, value, index, size);
194
195 buf = kmalloc(size, GFP_KERNEL);
196 if (!buf)
197 goto out;
198
199 err = usb_control_msg(
200 dev->udev,
201 usb_rcvctrlpipe(dev->udev, 0),
202 cmd,
203 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
204 value,
205 index,
206 buf,
207 size,
208 USB_CTRL_GET_TIMEOUT);
209 if (err == size)
210 memcpy(data, buf, size);
211 else if (err >= 0)
212 err = -EINVAL;
213 kfree(buf);
214
215out:
216 return err;
217}
218
219static int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
220 u16 size, void *data)
221{
222 void *buf = NULL;
223 int err = -ENOMEM;
224
225 netdev_dbg(dev->net, "asix_write_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
226 cmd, value, index, size);
227
228 if (data) {
229 buf = kmemdup(data, size, GFP_KERNEL);
230 if (!buf)
231 goto out;
232 }
233
234 err = usb_control_msg(
235 dev->udev,
236 usb_sndctrlpipe(dev->udev, 0),
237 cmd,
238 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
239 value,
240 index,
241 buf,
242 size,
243 USB_CTRL_SET_TIMEOUT);
244 kfree(buf);
245
246out:
247 return err;
248}
249
250static void asix_async_cmd_callback(struct urb *urb)
251{
252 struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
253 int status = urb->status;
254
255 if (status < 0)
256 printk(KERN_DEBUG "asix_async_cmd_callback() failed with %d",
257 status);
258
259 kfree(req);
260 usb_free_urb(urb);
261}
262
263static void
264asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
265 u16 size, void *data)
266{
267 struct usb_ctrlrequest *req;
268 int status;
269 struct urb *urb;
270
271 netdev_dbg(dev->net, "asix_write_cmd_async() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
272 cmd, value, index, size);
273
274 urb = usb_alloc_urb(0, GFP_ATOMIC);
275 if (!urb) {
276 netdev_err(dev->net, "Error allocating URB in write_cmd_async!\n");
277 return;
278 }
279
280 req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
281 if (!req) {
282 netdev_err(dev->net, "Failed to allocate memory for control request\n");
283 usb_free_urb(urb);
284 return;
285 }
286
287 req->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
288 req->bRequest = cmd;
289 req->wValue = cpu_to_le16(value);
290 req->wIndex = cpu_to_le16(index);
291 req->wLength = cpu_to_le16(size);
292
293 usb_fill_control_urb(urb, dev->udev,
294 usb_sndctrlpipe(dev->udev, 0),
295 (void *)req, data, size,
296 asix_async_cmd_callback, req);
297
298 status = usb_submit_urb(urb, GFP_ATOMIC);
299 if (status < 0) {
300 netdev_err(dev->net, "Error submitting the control message: status=%d\n",
301 status);
302 kfree(req);
303 usb_free_urb(urb);
304 }
305}
306
307static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
308{
309 int offset = 0;
310
311 while (offset + sizeof(u32) < skb->len) {
312 struct sk_buff *ax_skb;
313 u16 size;
314 u32 header = get_unaligned_le32(skb->data + offset);
315
316 offset += sizeof(u32);
317
318 /* get the packet length */
319 size = (u16) (header & 0x7ff);
320 if (size != ((~header >> 16) & 0x07ff)) {
321 netdev_err(dev->net, "asix_rx_fixup() Bad Header Length\n");
322 return 0;
323 }
324
325 if ((size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) ||
326 (size + offset > skb->len)) {
327 netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
328 size);
329 return 0;
330 }
331 ax_skb = netdev_alloc_skb_ip_align(dev->net, size);
332 if (!ax_skb)
333 return 0;
334
335 skb_put(ax_skb, size);
336 memcpy(ax_skb->data, skb->data + offset, size);
337 usbnet_skb_return(dev, ax_skb);
338
339 offset += (size + 1) & 0xfffe;
340 }
341
342 if (skb->len != offset) {
343 netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d\n",
344 skb->len);
345 return 0;
346 }
347 return 1;
348}
349
350static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
351 gfp_t flags)
352{
353 int padlen;
354 int headroom = skb_headroom(skb);
355 int tailroom = skb_tailroom(skb);
356 u32 packet_len;
357 u32 padbytes = 0xffff0000;
358
359 padlen = ((skb->len + 4) & (dev->maxpacket - 1)) ? 0 : 4;
360
361 if ((!skb_cloned(skb)) &&
362 ((headroom + tailroom) >= (4 + padlen))) {
363 if ((headroom < 4) || (tailroom < padlen)) {
364 skb->data = memmove(skb->head + 4, skb->data, skb->len);
365 skb_set_tail_pointer(skb, skb->len);
366 }
367 } else {
368 struct sk_buff *skb2;
369 skb2 = skb_copy_expand(skb, 4, padlen, flags);
370 dev_kfree_skb_any(skb);
371 skb = skb2;
372 if (!skb)
373 return NULL;
374 }
375
376 skb_push(skb, 4);
377 packet_len = (((skb->len - 4) ^ 0x0000ffff) << 16) + (skb->len - 4);
378 cpu_to_le32s(&packet_len);
379 skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len));
380
381 if (padlen) {
382 cpu_to_le32s(&padbytes);
383 memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
384 skb_put(skb, sizeof(padbytes));
385 }
386 return skb;
387}
388
389static void asix_status(struct usbnet *dev, struct urb *urb) 47static void asix_status(struct usbnet *dev, struct urb *urb)
390{ 48{
391 struct ax88172_int_data *event; 49 struct ax88172_int_data *event;
@@ -406,200 +64,6 @@ static void asix_status(struct usbnet *dev, struct urb *urb)
406 } 64 }
407} 65}
408 66
409static inline int asix_set_sw_mii(struct usbnet *dev)
410{
411 int ret;
412 ret = asix_write_cmd(dev, AX_CMD_SET_SW_MII, 0x0000, 0, 0, NULL);
413 if (ret < 0)
414 netdev_err(dev->net, "Failed to enable software MII access\n");
415 return ret;
416}
417
418static inline int asix_set_hw_mii(struct usbnet *dev)
419{
420 int ret;
421 ret = asix_write_cmd(dev, AX_CMD_SET_HW_MII, 0x0000, 0, 0, NULL);
422 if (ret < 0)
423 netdev_err(dev->net, "Failed to enable hardware MII access\n");
424 return ret;
425}
426
427static inline int asix_get_phy_addr(struct usbnet *dev)
428{
429 u8 buf[2];
430 int ret = asix_read_cmd(dev, AX_CMD_READ_PHY_ID, 0, 0, 2, buf);
431
432 netdev_dbg(dev->net, "asix_get_phy_addr()\n");
433
434 if (ret < 0) {
435 netdev_err(dev->net, "Error reading PHYID register: %02x\n", ret);
436 goto out;
437 }
438 netdev_dbg(dev->net, "asix_get_phy_addr() returning 0x%04x\n",
439 *((__le16 *)buf));
440 ret = buf[1];
441
442out:
443 return ret;
444}
445
446static int asix_sw_reset(struct usbnet *dev, u8 flags)
447{
448 int ret;
449
450 ret = asix_write_cmd(dev, AX_CMD_SW_RESET, flags, 0, 0, NULL);
451 if (ret < 0)
452 netdev_err(dev->net, "Failed to send software reset: %02x\n", ret);
453
454 return ret;
455}
456
457static u16 asix_read_rx_ctl(struct usbnet *dev)
458{
459 __le16 v;
460 int ret = asix_read_cmd(dev, AX_CMD_READ_RX_CTL, 0, 0, 2, &v);
461
462 if (ret < 0) {
463 netdev_err(dev->net, "Error reading RX_CTL register: %02x\n", ret);
464 goto out;
465 }
466 ret = le16_to_cpu(v);
467out:
468 return ret;
469}
470
471static int asix_write_rx_ctl(struct usbnet *dev, u16 mode)
472{
473 int ret;
474
475 netdev_dbg(dev->net, "asix_write_rx_ctl() - mode = 0x%04x\n", mode);
476 ret = asix_write_cmd(dev, AX_CMD_WRITE_RX_CTL, mode, 0, 0, NULL);
477 if (ret < 0)
478 netdev_err(dev->net, "Failed to write RX_CTL mode to 0x%04x: %02x\n",
479 mode, ret);
480
481 return ret;
482}
483
484static u16 asix_read_medium_status(struct usbnet *dev)
485{
486 __le16 v;
487 int ret = asix_read_cmd(dev, AX_CMD_READ_MEDIUM_STATUS, 0, 0, 2, &v);
488
489 if (ret < 0) {
490 netdev_err(dev->net, "Error reading Medium Status register: %02x\n",
491 ret);
492 return ret; /* TODO: callers not checking for error ret */
493 }
494
495 return le16_to_cpu(v);
496
497}
498
499static int asix_write_medium_mode(struct usbnet *dev, u16 mode)
500{
501 int ret;
502
503 netdev_dbg(dev->net, "asix_write_medium_mode() - mode = 0x%04x\n", mode);
504 ret = asix_write_cmd(dev, AX_CMD_WRITE_MEDIUM_MODE, mode, 0, 0, NULL);
505 if (ret < 0)
506 netdev_err(dev->net, "Failed to write Medium Mode mode to 0x%04x: %02x\n",
507 mode, ret);
508
509 return ret;
510}
511
512static int asix_write_gpio(struct usbnet *dev, u16 value, int sleep)
513{
514 int ret;
515
516 netdev_dbg(dev->net, "asix_write_gpio() - value = 0x%04x\n", value);
517 ret = asix_write_cmd(dev, AX_CMD_WRITE_GPIOS, value, 0, 0, NULL);
518 if (ret < 0)
519 netdev_err(dev->net, "Failed to write GPIO value 0x%04x: %02x\n",
520 value, ret);
521
522 if (sleep)
523 msleep(sleep);
524
525 return ret;
526}
527
528/*
529 * AX88772 & AX88178 have a 16-bit RX_CTL value
530 */
531static void asix_set_multicast(struct net_device *net)
532{
533 struct usbnet *dev = netdev_priv(net);
534 struct asix_data *data = (struct asix_data *)&dev->data;
535 u16 rx_ctl = AX_DEFAULT_RX_CTL;
536
537 if (net->flags & IFF_PROMISC) {
538 rx_ctl |= AX_RX_CTL_PRO;
539 } else if (net->flags & IFF_ALLMULTI ||
540 netdev_mc_count(net) > AX_MAX_MCAST) {
541 rx_ctl |= AX_RX_CTL_AMALL;
542 } else if (netdev_mc_empty(net)) {
543 /* just broadcast and directed */
544 } else {
545 /* We use the 20 byte dev->data
546 * for our 8 byte filter buffer
547 * to avoid allocating memory that
548 * is tricky to free later */
549 struct netdev_hw_addr *ha;
550 u32 crc_bits;
551
552 memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
553
554 /* Build the multicast hash filter. */
555 netdev_for_each_mc_addr(ha, net) {
556 crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
557 data->multi_filter[crc_bits >> 3] |=
558 1 << (crc_bits & 7);
559 }
560
561 asix_write_cmd_async(dev, AX_CMD_WRITE_MULTI_FILTER, 0, 0,
562 AX_MCAST_FILTER_SIZE, data->multi_filter);
563
564 rx_ctl |= AX_RX_CTL_AM;
565 }
566
567 asix_write_cmd_async(dev, AX_CMD_WRITE_RX_CTL, rx_ctl, 0, 0, NULL);
568}
569
570static int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
571{
572 struct usbnet *dev = netdev_priv(netdev);
573 __le16 res;
574
575 mutex_lock(&dev->phy_mutex);
576 asix_set_sw_mii(dev);
577 asix_read_cmd(dev, AX_CMD_READ_MII_REG, phy_id,
578 (__u16)loc, 2, &res);
579 asix_set_hw_mii(dev);
580 mutex_unlock(&dev->phy_mutex);
581
582 netdev_dbg(dev->net, "asix_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",
583 phy_id, loc, le16_to_cpu(res));
584
585 return le16_to_cpu(res);
586}
587
588static void
589asix_mdio_write(struct net_device *netdev, int phy_id, int loc, int val)
590{
591 struct usbnet *dev = netdev_priv(netdev);
592 __le16 res = cpu_to_le16(val);
593
594 netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
595 phy_id, loc, val);
596 mutex_lock(&dev->phy_mutex);
597 asix_set_sw_mii(dev);
598 asix_write_cmd(dev, AX_CMD_WRITE_MII_REG, phy_id, (__u16)loc, 2, &res);
599 asix_set_hw_mii(dev);
600 mutex_unlock(&dev->phy_mutex);
601}
602
603/* Get the PHY Identifier from the PHYSID1 & PHYSID2 MII registers */ 67/* Get the PHY Identifier from the PHYSID1 & PHYSID2 MII registers */
604static u32 asix_get_phyid(struct usbnet *dev) 68static u32 asix_get_phyid(struct usbnet *dev)
605{ 69{
@@ -629,88 +93,6 @@ static u32 asix_get_phyid(struct usbnet *dev)
629 return phy_id; 93 return phy_id;
630} 94}
631 95
632static void
633asix_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
634{
635 struct usbnet *dev = netdev_priv(net);
636 u8 opt;
637
638 if (asix_read_cmd(dev, AX_CMD_READ_MONITOR_MODE, 0, 0, 1, &opt) < 0) {
639 wolinfo->supported = 0;
640 wolinfo->wolopts = 0;
641 return;
642 }
643 wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
644 wolinfo->wolopts = 0;
645 if (opt & AX_MONITOR_LINK)
646 wolinfo->wolopts |= WAKE_PHY;
647 if (opt & AX_MONITOR_MAGIC)
648 wolinfo->wolopts |= WAKE_MAGIC;
649}
650
651static int
652asix_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
653{
654 struct usbnet *dev = netdev_priv(net);
655 u8 opt = 0;
656
657 if (wolinfo->wolopts & WAKE_PHY)
658 opt |= AX_MONITOR_LINK;
659 if (wolinfo->wolopts & WAKE_MAGIC)
660 opt |= AX_MONITOR_MAGIC;
661
662 if (asix_write_cmd(dev, AX_CMD_WRITE_MONITOR_MODE,
663 opt, 0, 0, NULL) < 0)
664 return -EINVAL;
665
666 return 0;
667}
668
669static int asix_get_eeprom_len(struct net_device *net)
670{
671 struct usbnet *dev = netdev_priv(net);
672 struct asix_data *data = (struct asix_data *)&dev->data;
673
674 return data->eeprom_len;
675}
676
677static int asix_get_eeprom(struct net_device *net,
678 struct ethtool_eeprom *eeprom, u8 *data)
679{
680 struct usbnet *dev = netdev_priv(net);
681 __le16 *ebuf = (__le16 *)data;
682 int i;
683
684 /* Crude hack to ensure that we don't overwrite memory
685 * if an odd length is supplied
686 */
687 if (eeprom->len % 2)
688 return -EINVAL;
689
690 eeprom->magic = AX_EEPROM_MAGIC;
691
692 /* ax8817x returns 2 bytes from eeprom on read */
693 for (i=0; i < eeprom->len / 2; i++) {
694 if (asix_read_cmd(dev, AX_CMD_READ_EEPROM,
695 eeprom->offset + i, 0, 2, &ebuf[i]) < 0)
696 return -EINVAL;
697 }
698 return 0;
699}
700
701static void asix_get_drvinfo (struct net_device *net,
702 struct ethtool_drvinfo *info)
703{
704 struct usbnet *dev = netdev_priv(net);
705 struct asix_data *data = (struct asix_data *)&dev->data;
706
707 /* Inherit standard device info */
708 usbnet_get_drvinfo(net, info);
709 strncpy (info->driver, DRIVER_NAME, sizeof info->driver);
710 strncpy (info->version, DRIVER_VERSION, sizeof info->version);
711 info->eedump_len = data->eeprom_len;
712}
713
714static u32 asix_get_link(struct net_device *net) 96static u32 asix_get_link(struct net_device *net)
715{ 97{
716 struct usbnet *dev = netdev_priv(net); 98 struct usbnet *dev = netdev_priv(net);
@@ -725,30 +107,6 @@ static int asix_ioctl (struct net_device *net, struct ifreq *rq, int cmd)
725 return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL); 107 return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
726} 108}
727 109
728static int asix_set_mac_address(struct net_device *net, void *p)
729{
730 struct usbnet *dev = netdev_priv(net);
731 struct asix_data *data = (struct asix_data *)&dev->data;
732 struct sockaddr *addr = p;
733
734 if (netif_running(net))
735 return -EBUSY;
736 if (!is_valid_ether_addr(addr->sa_data))
737 return -EADDRNOTAVAIL;
738
739 memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
740
741 /* We use the 20 byte dev->data
742 * for our 6 byte mac buffer
743 * to avoid allocating memory that
744 * is tricky to free later */
745 memcpy(data->mac_addr, addr->sa_data, ETH_ALEN);
746 asix_write_cmd_async(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
747 data->mac_addr);
748
749 return 0;
750}
751
752/* We need to override some ethtool_ops so we require our 110/* We need to override some ethtool_ops so we require our
753 own structure so we don't interfere with other usbnet 111 own structure so we don't interfere with other usbnet
754 devices that may be connected at the same time. */ 112 devices that may be connected at the same time. */
@@ -761,6 +119,7 @@ static const struct ethtool_ops ax88172_ethtool_ops = {
761 .set_wol = asix_set_wol, 119 .set_wol = asix_set_wol,
762 .get_eeprom_len = asix_get_eeprom_len, 120 .get_eeprom_len = asix_get_eeprom_len,
763 .get_eeprom = asix_get_eeprom, 121 .get_eeprom = asix_get_eeprom,
122 .set_eeprom = asix_set_eeprom,
764 .get_settings = usbnet_get_settings, 123 .get_settings = usbnet_get_settings,
765 .set_settings = usbnet_set_settings, 124 .set_settings = usbnet_set_settings,
766 .nway_reset = usbnet_nway_reset, 125 .nway_reset = usbnet_nway_reset,
@@ -843,9 +202,6 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
843 u8 buf[ETH_ALEN]; 202 u8 buf[ETH_ALEN];
844 int i; 203 int i;
845 unsigned long gpio_bits = dev->driver_info->data; 204 unsigned long gpio_bits = dev->driver_info->data;
846 struct asix_data *data = (struct asix_data *)&dev->data;
847
848 data->eeprom_len = AX88172_EEPROM_LEN;
849 205
850 usbnet_get_endpoints(dev,intf); 206 usbnet_get_endpoints(dev,intf);
851 207
@@ -880,6 +236,8 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
880 236
881 dev->net->netdev_ops = &ax88172_netdev_ops; 237 dev->net->netdev_ops = &ax88172_netdev_ops;
882 dev->net->ethtool_ops = &ax88172_ethtool_ops; 238 dev->net->ethtool_ops = &ax88172_ethtool_ops;
239 dev->net->needed_headroom = 4; /* cf asix_tx_fixup() */
240 dev->net->needed_tailroom = 4; /* cf asix_tx_fixup() */
883 241
884 asix_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET); 242 asix_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET);
885 asix_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE, 243 asix_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
@@ -901,6 +259,7 @@ static const struct ethtool_ops ax88772_ethtool_ops = {
901 .set_wol = asix_set_wol, 259 .set_wol = asix_set_wol,
902 .get_eeprom_len = asix_get_eeprom_len, 260 .get_eeprom_len = asix_get_eeprom_len,
903 .get_eeprom = asix_get_eeprom, 261 .get_eeprom = asix_get_eeprom,
262 .set_eeprom = asix_set_eeprom,
904 .get_settings = usbnet_get_settings, 263 .get_settings = usbnet_get_settings,
905 .set_settings = usbnet_set_settings, 264 .set_settings = usbnet_set_settings,
906 .nway_reset = usbnet_nway_reset, 265 .nway_reset = usbnet_nway_reset,
@@ -1049,12 +408,9 @@ static const struct net_device_ops ax88772_netdev_ops = {
1049static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) 408static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
1050{ 409{
1051 int ret, embd_phy; 410 int ret, embd_phy;
1052 struct asix_data *data = (struct asix_data *)&dev->data;
1053 u8 buf[ETH_ALEN]; 411 u8 buf[ETH_ALEN];
1054 u32 phyid; 412 u32 phyid;
1055 413
1056 data->eeprom_len = AX88772_EEPROM_LEN;
1057
1058 usbnet_get_endpoints(dev,intf); 414 usbnet_get_endpoints(dev,intf);
1059 415
1060 /* Get the MAC address */ 416 /* Get the MAC address */
@@ -1075,6 +431,8 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
1075 431
1076 dev->net->netdev_ops = &ax88772_netdev_ops; 432 dev->net->netdev_ops = &ax88772_netdev_ops;
1077 dev->net->ethtool_ops = &ax88772_ethtool_ops; 433 dev->net->ethtool_ops = &ax88772_ethtool_ops;
434 dev->net->needed_headroom = 4; /* cf asix_tx_fixup() */
435 dev->net->needed_tailroom = 4; /* cf asix_tx_fixup() */
1078 436
1079 embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0); 437 embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0);
1080 438
@@ -1122,6 +480,7 @@ static const struct ethtool_ops ax88178_ethtool_ops = {
1122 .set_wol = asix_set_wol, 480 .set_wol = asix_set_wol,
1123 .get_eeprom_len = asix_get_eeprom_len, 481 .get_eeprom_len = asix_get_eeprom_len,
1124 .get_eeprom = asix_get_eeprom, 482 .get_eeprom = asix_get_eeprom,
483 .set_eeprom = asix_set_eeprom,
1125 .get_settings = usbnet_get_settings, 484 .get_settings = usbnet_get_settings,
1126 .set_settings = usbnet_set_settings, 485 .set_settings = usbnet_set_settings,
1127 .nway_reset = usbnet_nway_reset, 486 .nway_reset = usbnet_nway_reset,
@@ -1405,9 +764,6 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
1405{ 764{
1406 int ret; 765 int ret;
1407 u8 buf[ETH_ALEN]; 766 u8 buf[ETH_ALEN];
1408 struct asix_data *data = (struct asix_data *)&dev->data;
1409
1410 data->eeprom_len = AX88772_EEPROM_LEN;
1411 767
1412 usbnet_get_endpoints(dev,intf); 768 usbnet_get_endpoints(dev,intf);
1413 769
@@ -1510,6 +866,8 @@ static const struct driver_info ax88178_info = {
1510 .tx_fixup = asix_tx_fixup, 866 .tx_fixup = asix_tx_fixup,
1511}; 867};
1512 868
869extern const struct driver_info ax88172a_info;
870
1513static const struct usb_device_id products [] = { 871static const struct usb_device_id products [] = {
1514{ 872{
1515 // Linksys USB200M 873 // Linksys USB200M
@@ -1635,6 +993,10 @@ static const struct usb_device_id products [] = {
1635 // Asus USB Ethernet Adapter 993 // Asus USB Ethernet Adapter
1636 USB_DEVICE (0x0b95, 0x7e2b), 994 USB_DEVICE (0x0b95, 0x7e2b),
1637 .driver_info = (unsigned long) &ax88772_info, 995 .driver_info = (unsigned long) &ax88772_info,
996}, {
997 /* ASIX 88172a demo board */
998 USB_DEVICE(0x0b95, 0x172a),
999 .driver_info = (unsigned long) &ax88172a_info,
1638}, 1000},
1639 { }, // END 1001 { }, // END
1640}; 1002};
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
new file mode 100644
index 000000000000..c8e0aa85fb8e
--- /dev/null
+++ b/drivers/net/usb/ax88172a.c
@@ -0,0 +1,414 @@
1/*
2 * ASIX AX88172A based USB 2.0 Ethernet Devices
3 * Copyright (C) 2012 OMICRON electronics GmbH
4 *
5 * Supports external PHYs via phylib. Based on the driver for the
6 * AX88772. Original copyrights follow:
7 *
8 * Copyright (C) 2003-2006 David Hollis <dhollis@davehollis.com>
9 * Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net>
10 * Copyright (C) 2006 James Painter <jamie.painter@iname.com>
11 * Copyright (c) 2002-2003 TiVo Inc.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include "asix.h"
29#include <linux/phy.h>
30
31struct ax88172a_private {
32 struct mii_bus *mdio;
33 struct phy_device *phydev;
34 char phy_name[20];
35 u16 phy_addr;
36 u16 oldmode;
37 int use_embdphy;
38};
39
40/* MDIO read and write wrappers for phylib */
41static int asix_mdio_bus_read(struct mii_bus *bus, int phy_id, int regnum)
42{
43 return asix_mdio_read(((struct usbnet *)bus->priv)->net, phy_id,
44 regnum);
45}
46
47static int asix_mdio_bus_write(struct mii_bus *bus, int phy_id, int regnum,
48 u16 val)
49{
50 asix_mdio_write(((struct usbnet *)bus->priv)->net, phy_id, regnum, val);
51 return 0;
52}
53
54static int ax88172a_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
55{
56 if (!netif_running(net))
57 return -EINVAL;
58
59 if (!net->phydev)
60 return -ENODEV;
61
62 return phy_mii_ioctl(net->phydev, rq, cmd);
63}
64
65/* set MAC link settings according to information from phylib */
66static void ax88172a_adjust_link(struct net_device *netdev)
67{
68 struct phy_device *phydev = netdev->phydev;
69 struct usbnet *dev = netdev_priv(netdev);
70 struct ax88172a_private *priv = dev->driver_priv;
71 u16 mode = 0;
72
73 if (phydev->link) {
74 mode = AX88772_MEDIUM_DEFAULT;
75
76 if (phydev->duplex == DUPLEX_HALF)
77 mode &= ~AX_MEDIUM_FD;
78
79 if (phydev->speed != SPEED_100)
80 mode &= ~AX_MEDIUM_PS;
81 }
82
83 if (mode != priv->oldmode) {
84 asix_write_medium_mode(dev, mode);
85 priv->oldmode = mode;
86 netdev_dbg(netdev, "speed %u duplex %d, setting mode to 0x%04x\n",
87 phydev->speed, phydev->duplex, mode);
88 phy_print_status(phydev);
89 }
90}
91
92static void ax88172a_status(struct usbnet *dev, struct urb *urb)
93{
94 /* link changes are detected by polling the phy */
95}
96
97/* use phylib infrastructure */
98static int ax88172a_init_mdio(struct usbnet *dev)
99{
100 struct ax88172a_private *priv = dev->driver_priv;
101 int ret, i;
102
103 priv->mdio = mdiobus_alloc();
104 if (!priv->mdio) {
105 netdev_err(dev->net, "Could not allocate MDIO bus\n");
106 return -ENOMEM;
107 }
108
109 priv->mdio->priv = (void *)dev;
110 priv->mdio->read = &asix_mdio_bus_read;
111 priv->mdio->write = &asix_mdio_bus_write;
112 priv->mdio->name = "Asix MDIO Bus";
113 /* mii bus name is usb-<usb bus number>-<usb device number> */
114 snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
115 dev->udev->bus->busnum, dev->udev->devnum);
116
117 priv->mdio->irq = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
118 if (!priv->mdio->irq) {
119 netdev_err(dev->net, "Could not allocate mdio->irq\n");
120 ret = -ENOMEM;
121 goto mfree;
122 }
123 for (i = 0; i < PHY_MAX_ADDR; i++)
124 priv->mdio->irq[i] = PHY_POLL;
125
126 ret = mdiobus_register(priv->mdio);
127 if (ret) {
128 netdev_err(dev->net, "Could not register MDIO bus\n");
129 goto ifree;
130 }
131
132 netdev_info(dev->net, "registered mdio bus %s\n", priv->mdio->id);
133 return 0;
134
135ifree:
136 kfree(priv->mdio->irq);
137mfree:
138 mdiobus_free(priv->mdio);
139 return ret;
140}
141
142static void ax88172a_remove_mdio(struct usbnet *dev)
143{
144 struct ax88172a_private *priv = dev->driver_priv;
145
146 netdev_info(dev->net, "deregistering mdio bus %s\n", priv->mdio->id);
147 mdiobus_unregister(priv->mdio);
148 kfree(priv->mdio->irq);
149 mdiobus_free(priv->mdio);
150}
151
152static const struct net_device_ops ax88172a_netdev_ops = {
153 .ndo_open = usbnet_open,
154 .ndo_stop = usbnet_stop,
155 .ndo_start_xmit = usbnet_start_xmit,
156 .ndo_tx_timeout = usbnet_tx_timeout,
157 .ndo_change_mtu = usbnet_change_mtu,
158 .ndo_set_mac_address = asix_set_mac_address,
159 .ndo_validate_addr = eth_validate_addr,
160 .ndo_do_ioctl = ax88172a_ioctl,
161 .ndo_set_rx_mode = asix_set_multicast,
162};
163
164int ax88172a_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
165{
166 if (!net->phydev)
167 return -ENODEV;
168
169 return phy_ethtool_gset(net->phydev, cmd);
170}
171
172int ax88172a_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
173{
174 if (!net->phydev)
175 return -ENODEV;
176
177 return phy_ethtool_sset(net->phydev, cmd);
178}
179
180int ax88172a_nway_reset(struct net_device *net)
181{
182 if (!net->phydev)
183 return -ENODEV;
184
185 return phy_start_aneg(net->phydev);
186}
187
188static const struct ethtool_ops ax88172a_ethtool_ops = {
189 .get_drvinfo = asix_get_drvinfo,
190 .get_link = usbnet_get_link,
191 .get_msglevel = usbnet_get_msglevel,
192 .set_msglevel = usbnet_set_msglevel,
193 .get_wol = asix_get_wol,
194 .set_wol = asix_set_wol,
195 .get_eeprom_len = asix_get_eeprom_len,
196 .get_eeprom = asix_get_eeprom,
197 .set_eeprom = asix_set_eeprom,
198 .get_settings = ax88172a_get_settings,
199 .set_settings = ax88172a_set_settings,
200 .nway_reset = ax88172a_nway_reset,
201};
202
203static int ax88172a_reset_phy(struct usbnet *dev, int embd_phy)
204{
205 int ret;
206
207 ret = asix_sw_reset(dev, AX_SWRESET_IPPD);
208 if (ret < 0)
209 goto err;
210
211 msleep(150);
212 ret = asix_sw_reset(dev, AX_SWRESET_CLEAR);
213 if (ret < 0)
214 goto err;
215
216 msleep(150);
217
218 ret = asix_sw_reset(dev, embd_phy ? AX_SWRESET_IPRL : AX_SWRESET_IPPD);
219 if (ret < 0)
220 goto err;
221
222 return 0;
223
224err:
225 return ret;
226}
227
228
229static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
230{
231 int ret;
232 u8 buf[ETH_ALEN];
233 struct ax88172a_private *priv;
234
235 usbnet_get_endpoints(dev, intf);
236
237 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
238 if (!priv) {
239 netdev_err(dev->net, "Could not allocate memory for private data\n");
240 return -ENOMEM;
241 }
242 dev->driver_priv = priv;
243
244 /* Get the MAC address */
245 ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf);
246 if (ret < 0) {
247 netdev_err(dev->net, "Failed to read MAC address: %d\n", ret);
248 goto free;
249 }
250 memcpy(dev->net->dev_addr, buf, ETH_ALEN);
251
252 dev->net->netdev_ops = &ax88172a_netdev_ops;
253 dev->net->ethtool_ops = &ax88172a_ethtool_ops;
254
255 /* are we using the internal or the external phy? */
256 ret = asix_read_cmd(dev, AX_CMD_SW_PHY_STATUS, 0, 0, 1, buf);
257 if (ret < 0) {
258 netdev_err(dev->net, "Failed to read software interface selection register: %d\n",
259 ret);
260 goto free;
261 }
262
263 netdev_dbg(dev->net, "AX_CMD_SW_PHY_STATUS = 0x%02x\n", buf[0]);
264 switch (buf[0] & AX_PHY_SELECT_MASK) {
265 case AX_PHY_SELECT_INTERNAL:
266 netdev_dbg(dev->net, "use internal phy\n");
267 priv->use_embdphy = 1;
268 break;
269 case AX_PHY_SELECT_EXTERNAL:
270 netdev_dbg(dev->net, "use external phy\n");
271 priv->use_embdphy = 0;
272 break;
273 default:
274 netdev_err(dev->net, "Interface mode not supported by driver\n");
275 ret = -ENOTSUPP;
276 goto free;
277 }
278
279 priv->phy_addr = asix_read_phy_addr(dev, priv->use_embdphy);
280 ax88172a_reset_phy(dev, priv->use_embdphy);
281
282 /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */
283 if (dev->driver_info->flags & FLAG_FRAMING_AX) {
284 /* hard_mtu is still the default - the device does not support
285 jumbo eth frames */
286 dev->rx_urb_size = 2048;
287 }
288
289 /* init MDIO bus */
290 ret = ax88172a_init_mdio(dev);
291 if (ret)
292 goto free;
293
294 return 0;
295
296free:
297 kfree(priv);
298 return ret;
299}
300
301static int ax88172a_stop(struct usbnet *dev)
302{
303 struct ax88172a_private *priv = dev->driver_priv;
304
305 netdev_dbg(dev->net, "Stopping interface\n");
306
307 if (priv->phydev) {
308 netdev_info(dev->net, "Disconnecting from phy %s\n",
309 priv->phy_name);
310 phy_stop(priv->phydev);
311 phy_disconnect(priv->phydev);
312 }
313
314 return 0;
315}
316
317static void ax88172a_unbind(struct usbnet *dev, struct usb_interface *intf)
318{
319 struct ax88172a_private *priv = dev->driver_priv;
320
321 ax88172a_remove_mdio(dev);
322 kfree(priv);
323}
324
325static int ax88172a_reset(struct usbnet *dev)
326{
327 struct asix_data *data = (struct asix_data *)&dev->data;
328 struct ax88172a_private *priv = dev->driver_priv;
329 int ret;
330 u16 rx_ctl;
331
332 ax88172a_reset_phy(dev, priv->use_embdphy);
333
334 msleep(150);
335 rx_ctl = asix_read_rx_ctl(dev);
336 netdev_dbg(dev->net, "RX_CTL is 0x%04x after software reset\n", rx_ctl);
337 ret = asix_write_rx_ctl(dev, 0x0000);
338 if (ret < 0)
339 goto out;
340
341 rx_ctl = asix_read_rx_ctl(dev);
342 netdev_dbg(dev->net, "RX_CTL is 0x%04x setting to 0x0000\n", rx_ctl);
343
344 msleep(150);
345
346 ret = asix_write_cmd(dev, AX_CMD_WRITE_IPG0,
347 AX88772_IPG0_DEFAULT | AX88772_IPG1_DEFAULT,
348 AX88772_IPG2_DEFAULT, 0, NULL);
349 if (ret < 0) {
350 netdev_err(dev->net, "Write IPG,IPG1,IPG2 failed: %d\n", ret);
351 goto out;
352 }
353
354 /* Rewrite MAC address */
355 memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN);
356 ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
357 data->mac_addr);
358 if (ret < 0)
359 goto out;
360
361 /* Set RX_CTL to default values with 2k buffer, and enable cactus */
362 ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL);
363 if (ret < 0)
364 goto out;
365
366 rx_ctl = asix_read_rx_ctl(dev);
367 netdev_dbg(dev->net, "RX_CTL is 0x%04x after all initializations\n",
368 rx_ctl);
369
370 rx_ctl = asix_read_medium_status(dev);
371 netdev_dbg(dev->net, "Medium Status is 0x%04x after all initializations\n",
372 rx_ctl);
373
374 /* Connect to PHY */
375 snprintf(priv->phy_name, 20, PHY_ID_FMT,
376 priv->mdio->id, priv->phy_addr);
377
378 priv->phydev = phy_connect(dev->net, priv->phy_name,
379 &ax88172a_adjust_link,
380 0, PHY_INTERFACE_MODE_MII);
381 if (IS_ERR(priv->phydev)) {
382 netdev_err(dev->net, "Could not connect to PHY device %s\n",
383 priv->phy_name);
384 ret = PTR_ERR(priv->phydev);
385 goto out;
386 }
387
388 netdev_info(dev->net, "Connected to phy %s\n", priv->phy_name);
389
390 /* During power-up, the AX88172A set the power down (BMCR_PDOWN)
391 * bit of the PHY. Bring the PHY up again.
392 */
393 genphy_resume(priv->phydev);
394 phy_start(priv->phydev);
395
396 return 0;
397
398out:
399 return ret;
400
401}
402
403const struct driver_info ax88172a_info = {
404 .description = "ASIX AX88172A USB 2.0 Ethernet",
405 .bind = ax88172a_bind,
406 .reset = ax88172a_reset,
407 .stop = ax88172a_stop,
408 .unbind = ax88172a_unbind,
409 .status = ax88172a_status,
410 .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
411 FLAG_MULTI_PACKET,
412 .rx_fixup = asix_rx_fixup,
413 .tx_fixup = asix_tx_fixup,
414};
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index d848d4dd5754..187c144c5e5b 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -394,7 +394,7 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
394 SET_NETDEV_DEV(dev, &intf->dev); 394 SET_NETDEV_DEV(dev, &intf->dev);
395 395
396 pnd->dev = dev; 396 pnd->dev = dev;
397 pnd->usb = usb_get_dev(usbdev); 397 pnd->usb = usbdev;
398 pnd->intf = intf; 398 pnd->intf = intf;
399 pnd->data_intf = data_intf; 399 pnd->data_intf = data_intf;
400 spin_lock_init(&pnd->tx_lock); 400 spin_lock_init(&pnd->tx_lock);
@@ -440,7 +440,6 @@ out:
440static void usbpn_disconnect(struct usb_interface *intf) 440static void usbpn_disconnect(struct usb_interface *intf)
441{ 441{
442 struct usbpn_dev *pnd = usb_get_intfdata(intf); 442 struct usbpn_dev *pnd = usb_get_intfdata(intf);
443 struct usb_device *usb = pnd->usb;
444 443
445 if (pnd->disconnected) 444 if (pnd->disconnected)
446 return; 445 return;
@@ -449,7 +448,6 @@ static void usbpn_disconnect(struct usb_interface *intf)
449 usb_driver_release_interface(&usbpn_driver, 448 usb_driver_release_interface(&usbpn_driver,
450 (pnd->intf == intf) ? pnd->data_intf : pnd->intf); 449 (pnd->intf == intf) ? pnd->data_intf : pnd->intf);
451 unregister_netdev(pnd->dev); 450 unregister_netdev(pnd->dev);
452 usb_put_dev(usb);
453} 451}
454 452
455static struct usb_driver usbpn_driver = { 453static struct usb_driver usbpn_driver = {
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 7023220456c5..a0b5807b30d4 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -1329,8 +1329,6 @@ static int pegasus_probe(struct usb_interface *intf,
1329 } 1329 }
1330 pegasus_count++; 1330 pegasus_count++;
1331 1331
1332 usb_get_dev(dev);
1333
1334 net = alloc_etherdev(sizeof(struct pegasus)); 1332 net = alloc_etherdev(sizeof(struct pegasus));
1335 if (!net) 1333 if (!net)
1336 goto out; 1334 goto out;
@@ -1407,7 +1405,6 @@ out2:
1407out1: 1405out1:
1408 free_netdev(net); 1406 free_netdev(net);
1409out: 1407out:
1410 usb_put_dev(dev);
1411 pegasus_dec_workqueue(); 1408 pegasus_dec_workqueue();
1412 return res; 1409 return res;
1413} 1410}
@@ -1425,7 +1422,6 @@ static void pegasus_disconnect(struct usb_interface *intf)
1425 pegasus->flags |= PEGASUS_UNPLUG; 1422 pegasus->flags |= PEGASUS_UNPLUG;
1426 cancel_delayed_work(&pegasus->carrier_check); 1423 cancel_delayed_work(&pegasus->carrier_check);
1427 unregister_netdev(pegasus->net); 1424 unregister_netdev(pegasus->net);
1428 usb_put_dev(interface_to_usbdev(intf));
1429 unlink_all_urbs(pegasus); 1425 unlink_all_urbs(pegasus);
1430 free_all_urbs(pegasus); 1426 free_all_urbs(pegasus);
1431 free_skb_pool(pegasus); 1427 free_skb_pool(pegasus);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index a051cedd64bd..2ea126a16d79 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1,6 +1,10 @@
1/* 1/*
2 * Copyright (c) 2012 Bjørn Mork <bjorn@mork.no> 2 * Copyright (c) 2012 Bjørn Mork <bjorn@mork.no>
3 * 3 *
4 * The probing code is heavily inspired by cdc_ether, which is:
5 * Copyright (C) 2003-2005 by David Brownell
6 * Copyright (C) 2006 by Ole Andre Vadla Ravnas (ActiveSync)
7 *
4 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
6 * version 2 as published by the Free Software Foundation. 10 * version 2 as published by the Free Software Foundation.
@@ -15,11 +19,7 @@
15#include <linux/usb/usbnet.h> 19#include <linux/usb/usbnet.h>
16#include <linux/usb/cdc-wdm.h> 20#include <linux/usb/cdc-wdm.h>
17 21
18/* The name of the CDC Device Management driver */ 22/* This driver supports wwan (3G/LTE/?) devices using a vendor
19#define DM_DRIVER "cdc_wdm"
20
21/*
22 * This driver supports wwan (3G/LTE/?) devices using a vendor
23 * specific management protocol called Qualcomm MSM Interface (QMI) - 23 * specific management protocol called Qualcomm MSM Interface (QMI) -
24 * in addition to the more common AT commands over serial interface 24 * in addition to the more common AT commands over serial interface
25 * management 25 * management
@@ -31,59 +31,117 @@
31 * management protocol is used in place of the standard CDC 31 * management protocol is used in place of the standard CDC
32 * notifications NOTIFY_NETWORK_CONNECTION and NOTIFY_SPEED_CHANGE 32 * notifications NOTIFY_NETWORK_CONNECTION and NOTIFY_SPEED_CHANGE
33 * 33 *
34 * Alternatively, control and data functions can be combined in a
35 * single USB interface.
36 *
34 * Handling a protocol like QMI is out of the scope for any driver. 37 * Handling a protocol like QMI is out of the scope for any driver.
35 * It can be exported as a character device using the cdc-wdm driver, 38 * It is exported as a character device using the cdc-wdm driver as
36 * which will enable userspace applications ("modem managers") to 39 * a subdriver, enabling userspace applications ("modem managers") to
37 * handle it. This may be required to use the network interface 40 * handle it.
38 * provided by the driver.
39 * 41 *
40 * These devices may alternatively/additionally be configured using AT 42 * These devices may alternatively/additionally be configured using AT
41 * commands on any of the serial interfaces driven by the option driver 43 * commands on a serial interface
42 *
43 * This driver binds only to the data ("slave") interface to enable
44 * the cdc-wdm driver to bind to the control interface. It still
45 * parses the CDC functional descriptors on the control interface to
46 * a) verify that this is indeed a handled interface (CDC Union
47 * header lists it as slave)
48 * b) get MAC address and other ethernet config from the CDC Ethernet
49 * header
50 * c) enable user bind requests against the control interface, which
51 * is the common way to bind to CDC Ethernet Control Model type
52 * interfaces
53 * d) provide a hint to the user about which interface is the
54 * corresponding management interface
55 */ 44 */
56 45
46/* driver specific data */
47struct qmi_wwan_state {
48 struct usb_driver *subdriver;
49 atomic_t pmcount;
50 unsigned long unused;
51 struct usb_interface *control;
52 struct usb_interface *data;
53};
54
55/* using a counter to merge subdriver requests with our own into a combined state */
56static int qmi_wwan_manage_power(struct usbnet *dev, int on)
57{
58 struct qmi_wwan_state *info = (void *)&dev->data;
59 int rv = 0;
60
61 dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__, atomic_read(&info->pmcount), on);
62
63 if ((on && atomic_add_return(1, &info->pmcount) == 1) || (!on && atomic_dec_and_test(&info->pmcount))) {
64 /* need autopm_get/put here to ensure the usbcore sees the new value */
65 rv = usb_autopm_get_interface(dev->intf);
66 if (rv < 0)
67 goto err;
68 dev->intf->needs_remote_wakeup = on;
69 usb_autopm_put_interface(dev->intf);
70 }
71err:
72 return rv;
73}
74
75static int qmi_wwan_cdc_wdm_manage_power(struct usb_interface *intf, int on)
76{
77 struct usbnet *dev = usb_get_intfdata(intf);
78
79 /* can be called while disconnecting */
80 if (!dev)
81 return 0;
82 return qmi_wwan_manage_power(dev, on);
83}
84
85/* collect all three endpoints and register subdriver */
86static int qmi_wwan_register_subdriver(struct usbnet *dev)
87{
88 int rv;
89 struct usb_driver *subdriver = NULL;
90 struct qmi_wwan_state *info = (void *)&dev->data;
91
92 /* collect bulk endpoints */
93 rv = usbnet_get_endpoints(dev, info->data);
94 if (rv < 0)
95 goto err;
96
97 /* update status endpoint if separate control interface */
98 if (info->control != info->data)
99 dev->status = &info->control->cur_altsetting->endpoint[0];
100
101 /* require interrupt endpoint for subdriver */
102 if (!dev->status) {
103 rv = -EINVAL;
104 goto err;
105 }
106
107 /* for subdriver power management */
108 atomic_set(&info->pmcount, 0);
109
110 /* register subdriver */
111 subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, 512, &qmi_wwan_cdc_wdm_manage_power);
112 if (IS_ERR(subdriver)) {
113 dev_err(&info->control->dev, "subdriver registration failed\n");
114 rv = PTR_ERR(subdriver);
115 goto err;
116 }
117
118 /* prevent usbnet from using status endpoint */
119 dev->status = NULL;
120
121 /* save subdriver struct for suspend/resume wrappers */
122 info->subdriver = subdriver;
123
124err:
125 return rv;
126}
127
57static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf) 128static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
58{ 129{
59 int status = -1; 130 int status = -1;
60 struct usb_interface *control = NULL;
61 u8 *buf = intf->cur_altsetting->extra; 131 u8 *buf = intf->cur_altsetting->extra;
62 int len = intf->cur_altsetting->extralen; 132 int len = intf->cur_altsetting->extralen;
63 struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc; 133 struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc;
64 struct usb_cdc_union_desc *cdc_union = NULL; 134 struct usb_cdc_union_desc *cdc_union = NULL;
65 struct usb_cdc_ether_desc *cdc_ether = NULL; 135 struct usb_cdc_ether_desc *cdc_ether = NULL;
66 u32 required = 1 << USB_CDC_HEADER_TYPE | 1 << USB_CDC_UNION_TYPE;
67 u32 found = 0; 136 u32 found = 0;
68 atomic_t *pmcount = (void *)&dev->data[1]; 137 struct usb_driver *driver = driver_of(intf);
138 struct qmi_wwan_state *info = (void *)&dev->data;
69 139
70 atomic_set(pmcount, 0); 140 BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state)));
71 141
72 /* 142 /* require a single interrupt status endpoint for subdriver */
73 * assume a data interface has no additional descriptors and 143 if (intf->cur_altsetting->desc.bNumEndpoints != 1)
74 * that the control and data interface are numbered 144 goto err;
75 * consecutively - this holds for the Huawei device at least
76 */
77 if (len == 0 && desc->bInterfaceNumber > 0) {
78 control = usb_ifnum_to_if(dev->udev, desc->bInterfaceNumber - 1);
79 if (!control)
80 goto err;
81
82 buf = control->cur_altsetting->extra;
83 len = control->cur_altsetting->extralen;
84 dev_dbg(&intf->dev, "guessing \"control\" => %s, \"data\" => this\n",
85 dev_name(&control->dev));
86 }
87 145
88 while (len > 3) { 146 while (len > 3) {
89 struct usb_descriptor_header *h = (void *)buf; 147 struct usb_descriptor_header *h = (void *)buf;
@@ -142,15 +200,23 @@ next_desc:
142 } 200 }
143 201
144 /* did we find all the required ones? */ 202 /* did we find all the required ones? */
145 if ((found & required) != required) { 203 if (!(found & (1 << USB_CDC_HEADER_TYPE)) ||
204 !(found & (1 << USB_CDC_UNION_TYPE))) {
146 dev_err(&intf->dev, "CDC functional descriptors missing\n"); 205 dev_err(&intf->dev, "CDC functional descriptors missing\n");
147 goto err; 206 goto err;
148 } 207 }
149 208
150 /* give the user a helpful hint if trying to bind to the wrong interface */ 209 /* verify CDC Union */
151 if (cdc_union && desc->bInterfaceNumber == cdc_union->bMasterInterface0) { 210 if (desc->bInterfaceNumber != cdc_union->bMasterInterface0) {
152 dev_err(&intf->dev, "leaving \"control\" interface for " DM_DRIVER " - try binding to %s instead!\n", 211 dev_err(&intf->dev, "bogus CDC Union: master=%u\n", cdc_union->bMasterInterface0);
153 dev_name(&usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0)->dev)); 212 goto err;
213 }
214
215 /* need to save these for unbind */
216 info->control = intf;
217 info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0);
218 if (!info->data) {
219 dev_err(&intf->dev, "bogus CDC Union: slave=%u\n", cdc_union->bSlaveInterface0);
154 goto err; 220 goto err;
155 } 221 }
156 222
@@ -160,63 +226,29 @@ next_desc:
160 usbnet_get_ethernet_addr(dev, cdc_ether->iMACAddress); 226 usbnet_get_ethernet_addr(dev, cdc_ether->iMACAddress);
161 } 227 }
162 228
163 /* success! point the user to the management interface */ 229 /* claim data interface and set it up */
164 if (control) 230 status = usb_driver_claim_interface(driver, info->data, dev);
165 dev_info(&intf->dev, "Use \"" DM_DRIVER "\" for QMI interface %s\n", 231 if (status < 0)
166 dev_name(&control->dev)); 232 goto err;
167
168 /* XXX: add a sysfs symlink somewhere to help management applications find it? */
169 233
170 /* collect bulk endpoints now that we know intf == "data" interface */ 234 status = qmi_wwan_register_subdriver(dev);
171 status = usbnet_get_endpoints(dev, intf); 235 if (status < 0) {
236 usb_set_intfdata(info->data, NULL);
237 usb_driver_release_interface(driver, info->data);
238 }
172 239
173err: 240err:
174 return status; 241 return status;
175} 242}
176 243
177/* using a counter to merge subdriver requests with our own into a combined state */
178static int qmi_wwan_manage_power(struct usbnet *dev, int on)
179{
180 atomic_t *pmcount = (void *)&dev->data[1];
181 int rv = 0;
182
183 dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__, atomic_read(pmcount), on);
184
185 if ((on && atomic_add_return(1, pmcount) == 1) || (!on && atomic_dec_and_test(pmcount))) {
186 /* need autopm_get/put here to ensure the usbcore sees the new value */
187 rv = usb_autopm_get_interface(dev->intf);
188 if (rv < 0)
189 goto err;
190 dev->intf->needs_remote_wakeup = on;
191 usb_autopm_put_interface(dev->intf);
192 }
193err:
194 return rv;
195}
196
197static int qmi_wwan_cdc_wdm_manage_power(struct usb_interface *intf, int on)
198{
199 struct usbnet *dev = usb_get_intfdata(intf);
200
201 /* can be called while disconnecting */
202 if (!dev)
203 return 0;
204 return qmi_wwan_manage_power(dev, on);
205}
206
207/* Some devices combine the "control" and "data" functions into a 244/* Some devices combine the "control" and "data" functions into a
208 * single interface with all three endpoints: interrupt + bulk in and 245 * single interface with all three endpoints: interrupt + bulk in and
209 * out 246 * out
210 * 247 */
211 * Setting up cdc-wdm as a subdriver owning the interrupt endpoint
212 * will let it provide userspace access to the encapsulated QMI
213 * protocol without interfering with the usbnet operations.
214 */
215static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf) 248static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf)
216{ 249{
217 int rv; 250 int rv;
218 struct usb_driver *subdriver = NULL; 251 struct qmi_wwan_state *info = (void *)&dev->data;
219 atomic_t *pmcount = (void *)&dev->data[1];
220 252
221 /* ZTE makes devices where the interface descriptors and endpoint 253 /* ZTE makes devices where the interface descriptors and endpoint
222 * configurations of two or more interfaces are identical, even 254 * configurations of two or more interfaces are identical, even
@@ -232,43 +264,39 @@ static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf)
232 goto err; 264 goto err;
233 } 265 }
234 266
235 atomic_set(pmcount, 0); 267 /* control and data is shared */
236 268 info->control = intf;
237 /* collect all three endpoints */ 269 info->data = intf;
238 rv = usbnet_get_endpoints(dev, intf); 270 rv = qmi_wwan_register_subdriver(dev);
239 if (rv < 0)
240 goto err;
241
242 /* require interrupt endpoint for subdriver */
243 if (!dev->status) {
244 rv = -EINVAL;
245 goto err;
246 }
247
248 subdriver = usb_cdc_wdm_register(intf, &dev->status->desc, 512, &qmi_wwan_cdc_wdm_manage_power);
249 if (IS_ERR(subdriver)) {
250 rv = PTR_ERR(subdriver);
251 goto err;
252 }
253
254 /* can't let usbnet use the interrupt endpoint */
255 dev->status = NULL;
256
257 /* save subdriver struct for suspend/resume wrappers */
258 dev->data[0] = (unsigned long)subdriver;
259 271
260err: 272err:
261 return rv; 273 return rv;
262} 274}
263 275
264static void qmi_wwan_unbind_shared(struct usbnet *dev, struct usb_interface *intf) 276static void qmi_wwan_unbind(struct usbnet *dev, struct usb_interface *intf)
265{ 277{
266 struct usb_driver *subdriver = (void *)dev->data[0]; 278 struct qmi_wwan_state *info = (void *)&dev->data;
267 279 struct usb_driver *driver = driver_of(intf);
268 if (subdriver && subdriver->disconnect) 280 struct usb_interface *other;
269 subdriver->disconnect(intf); 281
282 if (info->subdriver && info->subdriver->disconnect)
283 info->subdriver->disconnect(info->control);
284
285 /* allow user to unbind using either control or data */
286 if (intf == info->control)
287 other = info->data;
288 else
289 other = info->control;
290
291 /* only if not shared */
292 if (other && intf != other) {
293 usb_set_intfdata(other, NULL);
294 usb_driver_release_interface(driver, other);
295 }
270 296
271 dev->data[0] = (unsigned long)NULL; 297 info->subdriver = NULL;
298 info->data = NULL;
299 info->control = NULL;
272} 300}
273 301
274/* suspend/resume wrappers calling both usbnet and the cdc-wdm 302/* suspend/resume wrappers calling both usbnet and the cdc-wdm
@@ -280,15 +308,15 @@ static void qmi_wwan_unbind_shared(struct usbnet *dev, struct usb_interface *int
280static int qmi_wwan_suspend(struct usb_interface *intf, pm_message_t message) 308static int qmi_wwan_suspend(struct usb_interface *intf, pm_message_t message)
281{ 309{
282 struct usbnet *dev = usb_get_intfdata(intf); 310 struct usbnet *dev = usb_get_intfdata(intf);
283 struct usb_driver *subdriver = (void *)dev->data[0]; 311 struct qmi_wwan_state *info = (void *)&dev->data;
284 int ret; 312 int ret;
285 313
286 ret = usbnet_suspend(intf, message); 314 ret = usbnet_suspend(intf, message);
287 if (ret < 0) 315 if (ret < 0)
288 goto err; 316 goto err;
289 317
290 if (subdriver && subdriver->suspend) 318 if (info->subdriver && info->subdriver->suspend)
291 ret = subdriver->suspend(intf, message); 319 ret = info->subdriver->suspend(intf, message);
292 if (ret < 0) 320 if (ret < 0)
293 usbnet_resume(intf); 321 usbnet_resume(intf);
294err: 322err:
@@ -298,33 +326,33 @@ err:
298static int qmi_wwan_resume(struct usb_interface *intf) 326static int qmi_wwan_resume(struct usb_interface *intf)
299{ 327{
300 struct usbnet *dev = usb_get_intfdata(intf); 328 struct usbnet *dev = usb_get_intfdata(intf);
301 struct usb_driver *subdriver = (void *)dev->data[0]; 329 struct qmi_wwan_state *info = (void *)&dev->data;
302 int ret = 0; 330 int ret = 0;
303 331
304 if (subdriver && subdriver->resume) 332 if (info->subdriver && info->subdriver->resume)
305 ret = subdriver->resume(intf); 333 ret = info->subdriver->resume(intf);
306 if (ret < 0) 334 if (ret < 0)
307 goto err; 335 goto err;
308 ret = usbnet_resume(intf); 336 ret = usbnet_resume(intf);
309 if (ret < 0 && subdriver && subdriver->resume && subdriver->suspend) 337 if (ret < 0 && info->subdriver && info->subdriver->resume && info->subdriver->suspend)
310 subdriver->suspend(intf, PMSG_SUSPEND); 338 info->subdriver->suspend(intf, PMSG_SUSPEND);
311err: 339err:
312 return ret; 340 return ret;
313} 341}
314 342
315
316static const struct driver_info qmi_wwan_info = { 343static const struct driver_info qmi_wwan_info = {
317 .description = "QMI speaking wwan device", 344 .description = "WWAN/QMI device",
318 .flags = FLAG_WWAN, 345 .flags = FLAG_WWAN,
319 .bind = qmi_wwan_bind, 346 .bind = qmi_wwan_bind,
347 .unbind = qmi_wwan_unbind,
320 .manage_power = qmi_wwan_manage_power, 348 .manage_power = qmi_wwan_manage_power,
321}; 349};
322 350
323static const struct driver_info qmi_wwan_shared = { 351static const struct driver_info qmi_wwan_shared = {
324 .description = "QMI speaking wwan device with combined interface", 352 .description = "WWAN/QMI device",
325 .flags = FLAG_WWAN, 353 .flags = FLAG_WWAN,
326 .bind = qmi_wwan_bind_shared, 354 .bind = qmi_wwan_bind_shared,
327 .unbind = qmi_wwan_unbind_shared, 355 .unbind = qmi_wwan_unbind,
328 .manage_power = qmi_wwan_manage_power, 356 .manage_power = qmi_wwan_manage_power,
329}; 357};
330 358
@@ -332,7 +360,7 @@ static const struct driver_info qmi_wwan_force_int0 = {
332 .description = "Qualcomm WWAN/QMI device", 360 .description = "Qualcomm WWAN/QMI device",
333 .flags = FLAG_WWAN, 361 .flags = FLAG_WWAN,
334 .bind = qmi_wwan_bind_shared, 362 .bind = qmi_wwan_bind_shared,
335 .unbind = qmi_wwan_unbind_shared, 363 .unbind = qmi_wwan_unbind,
336 .manage_power = qmi_wwan_manage_power, 364 .manage_power = qmi_wwan_manage_power,
337 .data = BIT(0), /* interface whitelist bitmap */ 365 .data = BIT(0), /* interface whitelist bitmap */
338}; 366};
@@ -341,7 +369,7 @@ static const struct driver_info qmi_wwan_force_int1 = {
341 .description = "Qualcomm WWAN/QMI device", 369 .description = "Qualcomm WWAN/QMI device",
342 .flags = FLAG_WWAN, 370 .flags = FLAG_WWAN,
343 .bind = qmi_wwan_bind_shared, 371 .bind = qmi_wwan_bind_shared,
344 .unbind = qmi_wwan_unbind_shared, 372 .unbind = qmi_wwan_unbind,
345 .manage_power = qmi_wwan_manage_power, 373 .manage_power = qmi_wwan_manage_power,
346 .data = BIT(1), /* interface whitelist bitmap */ 374 .data = BIT(1), /* interface whitelist bitmap */
347}; 375};
@@ -350,7 +378,7 @@ static const struct driver_info qmi_wwan_force_int2 = {
350 .description = "Qualcomm WWAN/QMI device", 378 .description = "Qualcomm WWAN/QMI device",
351 .flags = FLAG_WWAN, 379 .flags = FLAG_WWAN,
352 .bind = qmi_wwan_bind_shared, 380 .bind = qmi_wwan_bind_shared,
353 .unbind = qmi_wwan_unbind_shared, 381 .unbind = qmi_wwan_unbind,
354 .manage_power = qmi_wwan_manage_power, 382 .manage_power = qmi_wwan_manage_power,
355 .data = BIT(2), /* interface whitelist bitmap */ 383 .data = BIT(2), /* interface whitelist bitmap */
356}; 384};
@@ -359,7 +387,7 @@ static const struct driver_info qmi_wwan_force_int3 = {
359 .description = "Qualcomm WWAN/QMI device", 387 .description = "Qualcomm WWAN/QMI device",
360 .flags = FLAG_WWAN, 388 .flags = FLAG_WWAN,
361 .bind = qmi_wwan_bind_shared, 389 .bind = qmi_wwan_bind_shared,
362 .unbind = qmi_wwan_unbind_shared, 390 .unbind = qmi_wwan_unbind,
363 .manage_power = qmi_wwan_manage_power, 391 .manage_power = qmi_wwan_manage_power,
364 .data = BIT(3), /* interface whitelist bitmap */ 392 .data = BIT(3), /* interface whitelist bitmap */
365}; 393};
@@ -368,7 +396,7 @@ static const struct driver_info qmi_wwan_force_int4 = {
368 .description = "Qualcomm WWAN/QMI device", 396 .description = "Qualcomm WWAN/QMI device",
369 .flags = FLAG_WWAN, 397 .flags = FLAG_WWAN,
370 .bind = qmi_wwan_bind_shared, 398 .bind = qmi_wwan_bind_shared,
371 .unbind = qmi_wwan_unbind_shared, 399 .unbind = qmi_wwan_unbind,
372 .manage_power = qmi_wwan_manage_power, 400 .manage_power = qmi_wwan_manage_power,
373 .data = BIT(4), /* interface whitelist bitmap */ 401 .data = BIT(4), /* interface whitelist bitmap */
374}; 402};
@@ -390,7 +418,7 @@ static const struct driver_info qmi_wwan_sierra = {
390 .description = "Sierra Wireless wwan/QMI device", 418 .description = "Sierra Wireless wwan/QMI device",
391 .flags = FLAG_WWAN, 419 .flags = FLAG_WWAN,
392 .bind = qmi_wwan_bind_shared, 420 .bind = qmi_wwan_bind_shared,
393 .unbind = qmi_wwan_unbind_shared, 421 .unbind = qmi_wwan_unbind,
394 .manage_power = qmi_wwan_manage_power, 422 .manage_power = qmi_wwan_manage_power,
395 .data = BIT(8) | BIT(19), /* interface whitelist bitmap */ 423 .data = BIT(8) | BIT(19), /* interface whitelist bitmap */
396}; 424};
@@ -413,7 +441,7 @@ static const struct usb_device_id products[] = {
413 .idVendor = HUAWEI_VENDOR_ID, 441 .idVendor = HUAWEI_VENDOR_ID,
414 .bInterfaceClass = USB_CLASS_VENDOR_SPEC, 442 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
415 .bInterfaceSubClass = 1, 443 .bInterfaceSubClass = 1,
416 .bInterfaceProtocol = 8, /* NOTE: This is the *slave* interface of the CDC Union! */ 444 .bInterfaceProtocol = 9, /* CDC Ethernet *control* interface */
417 .driver_info = (unsigned long)&qmi_wwan_info, 445 .driver_info = (unsigned long)&qmi_wwan_info,
418 }, 446 },
419 { /* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */ 447 { /* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */
@@ -421,7 +449,7 @@ static const struct usb_device_id products[] = {
421 .idVendor = HUAWEI_VENDOR_ID, 449 .idVendor = HUAWEI_VENDOR_ID,
422 .bInterfaceClass = USB_CLASS_VENDOR_SPEC, 450 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
423 .bInterfaceSubClass = 1, 451 .bInterfaceSubClass = 1,
424 .bInterfaceProtocol = 56, /* NOTE: This is the *slave* interface of the CDC Union! */ 452 .bInterfaceProtocol = 57, /* CDC Ethernet *control* interface */
425 .driver_info = (unsigned long)&qmi_wwan_info, 453 .driver_info = (unsigned long)&qmi_wwan_info,
426 }, 454 },
427 { /* Huawei E392, E398 and possibly others in "Windows mode" 455 { /* Huawei E392, E398 and possibly others in "Windows mode"
@@ -453,6 +481,15 @@ static const struct usb_device_id products[] = {
453 .bInterfaceProtocol = 0xff, 481 .bInterfaceProtocol = 0xff,
454 .driver_info = (unsigned long)&qmi_wwan_force_int4, 482 .driver_info = (unsigned long)&qmi_wwan_force_int4,
455 }, 483 },
484 { /* ZTE MF821D */
485 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
486 .idVendor = 0x19d2,
487 .idProduct = 0x0326,
488 .bInterfaceClass = 0xff,
489 .bInterfaceSubClass = 0xff,
490 .bInterfaceProtocol = 0xff,
491 .driver_info = (unsigned long)&qmi_wwan_force_int4,
492 },
456 { /* ZTE (Vodafone) K3520-Z */ 493 { /* ZTE (Vodafone) K3520-Z */
457 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, 494 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
458 .idVendor = 0x19d2, 495 .idVendor = 0x19d2,
@@ -572,10 +609,27 @@ static const struct usb_device_id products[] = {
572}; 609};
573MODULE_DEVICE_TABLE(usb, products); 610MODULE_DEVICE_TABLE(usb, products);
574 611
612static int qmi_wwan_probe(struct usb_interface *intf, const struct usb_device_id *prod)
613{
614 struct usb_device_id *id = (struct usb_device_id *)prod;
615
616 /* Workaround to enable dynamic IDs. This disables usbnet
617 * blacklisting functionality. Which, if required, can be
618 * reimplemented here by using a magic "blacklist" value
619 * instead of 0 in the static device id table
620 */
621 if (!id->driver_info) {
622 dev_dbg(&intf->dev, "setting defaults for dynamic device id\n");
623 id->driver_info = (unsigned long)&qmi_wwan_shared;
624 }
625
626 return usbnet_probe(intf, id);
627}
628
575static struct usb_driver qmi_wwan_driver = { 629static struct usb_driver qmi_wwan_driver = {
576 .name = "qmi_wwan", 630 .name = "qmi_wwan",
577 .id_table = products, 631 .id_table = products,
578 .probe = usbnet_probe, 632 .probe = qmi_wwan_probe,
579 .disconnect = usbnet_disconnect, 633 .disconnect = usbnet_disconnect,
580 .suspend = qmi_wwan_suspend, 634 .suspend = qmi_wwan_suspend,
581 .resume = qmi_wwan_resume, 635 .resume = qmi_wwan_resume,
@@ -584,17 +638,7 @@ static struct usb_driver qmi_wwan_driver = {
584 .disable_hub_initiated_lpm = 1, 638 .disable_hub_initiated_lpm = 1,
585}; 639};
586 640
587static int __init qmi_wwan_init(void) 641module_usb_driver(qmi_wwan_driver);
588{
589 return usb_register(&qmi_wwan_driver);
590}
591module_init(qmi_wwan_init);
592
593static void __exit qmi_wwan_exit(void)
594{
595 usb_deregister(&qmi_wwan_driver);
596}
597module_exit(qmi_wwan_exit);
598 642
599MODULE_AUTHOR("Bjørn Mork <bjorn@mork.no>"); 643MODULE_AUTHOR("Bjørn Mork <bjorn@mork.no>");
600MODULE_DESCRIPTION("Qualcomm MSM Interface (QMI) WWAN driver"); 644MODULE_DESCRIPTION("Qualcomm MSM Interface (QMI) WWAN driver");
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 1c6e51588da7..6c0c5b76fc41 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -616,7 +616,7 @@ static void smsc75xx_init_mac_address(struct usbnet *dev)
616 616
617 /* no eeprom, or eeprom values are invalid. generate random MAC */ 617 /* no eeprom, or eeprom values are invalid. generate random MAC */
618 eth_hw_addr_random(dev->net); 618 eth_hw_addr_random(dev->net);
619 netif_dbg(dev, ifup, dev->net, "MAC address set to random_ether_addr"); 619 netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr");
620} 620}
621 621
622static int smsc75xx_set_mac_address(struct usbnet *dev) 622static int smsc75xx_set_mac_address(struct usbnet *dev)
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index b1112e753859..25cc3a15a4ea 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -578,6 +578,36 @@ static int smsc95xx_ethtool_set_eeprom(struct net_device *netdev,
578 return smsc95xx_write_eeprom(dev, ee->offset, ee->len, data); 578 return smsc95xx_write_eeprom(dev, ee->offset, ee->len, data);
579} 579}
580 580
581static int smsc95xx_ethtool_getregslen(struct net_device *netdev)
582{
583 /* all smsc95xx registers */
584 return COE_CR - ID_REV + 1;
585}
586
587static void
588smsc95xx_ethtool_getregs(struct net_device *netdev, struct ethtool_regs *regs,
589 void *buf)
590{
591 struct usbnet *dev = netdev_priv(netdev);
592 unsigned int i, j;
593 int retval;
594 u32 *data = buf;
595
596 retval = smsc95xx_read_reg(dev, ID_REV, &regs->version);
597 if (retval < 0) {
598 netdev_warn(netdev, "REGS: cannot read ID_REV\n");
599 return;
600 }
601
602 for (i = ID_REV, j = 0; i <= COE_CR; i += (sizeof(u32)), j++) {
603 retval = smsc95xx_read_reg(dev, i, &data[j]);
604 if (retval < 0) {
605 netdev_warn(netdev, "REGS: cannot read reg[%x]\n", i);
606 return;
607 }
608 }
609}
610
581static const struct ethtool_ops smsc95xx_ethtool_ops = { 611static const struct ethtool_ops smsc95xx_ethtool_ops = {
582 .get_link = usbnet_get_link, 612 .get_link = usbnet_get_link,
583 .nway_reset = usbnet_nway_reset, 613 .nway_reset = usbnet_nway_reset,
@@ -589,6 +619,8 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = {
589 .get_eeprom_len = smsc95xx_ethtool_get_eeprom_len, 619 .get_eeprom_len = smsc95xx_ethtool_get_eeprom_len,
590 .get_eeprom = smsc95xx_ethtool_get_eeprom, 620 .get_eeprom = smsc95xx_ethtool_get_eeprom,
591 .set_eeprom = smsc95xx_ethtool_set_eeprom, 621 .set_eeprom = smsc95xx_ethtool_set_eeprom,
622 .get_regs_len = smsc95xx_ethtool_getregslen,
623 .get_regs = smsc95xx_ethtool_getregs,
592}; 624};
593 625
594static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) 626static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -615,7 +647,7 @@ static void smsc95xx_init_mac_address(struct usbnet *dev)
615 647
616 /* no eeprom, or eeprom values are invalid. generate random MAC */ 648 /* no eeprom, or eeprom values are invalid. generate random MAC */
617 eth_hw_addr_random(dev->net); 649 eth_hw_addr_random(dev->net);
618 netif_dbg(dev, ifup, dev->net, "MAC address set to random_ether_addr\n"); 650 netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n");
619} 651}
620 652
621static int smsc95xx_set_mac_address(struct usbnet *dev) 653static int smsc95xx_set_mac_address(struct usbnet *dev)
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index aba769d77459..8531c1caac28 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -180,7 +180,40 @@ int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
180} 180}
181EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr); 181EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr);
182 182
183static void intr_complete (struct urb *urb); 183static void intr_complete (struct urb *urb)
184{
185 struct usbnet *dev = urb->context;
186 int status = urb->status;
187
188 switch (status) {
189 /* success */
190 case 0:
191 dev->driver_info->status(dev, urb);
192 break;
193
194 /* software-driven interface shutdown */
195 case -ENOENT: /* urb killed */
196 case -ESHUTDOWN: /* hardware gone */
197 netif_dbg(dev, ifdown, dev->net,
198 "intr shutdown, code %d\n", status);
199 return;
200
201 /* NOTE: not throttling like RX/TX, since this endpoint
202 * already polls infrequently
203 */
204 default:
205 netdev_dbg(dev->net, "intr status %d\n", status);
206 break;
207 }
208
209 if (!netif_running (dev->net))
210 return;
211
212 status = usb_submit_urb (urb, GFP_ATOMIC);
213 if (status != 0)
214 netif_err(dev, timer, dev->net,
215 "intr resubmit --> %d\n", status);
216}
184 217
185static int init_status (struct usbnet *dev, struct usb_interface *intf) 218static int init_status (struct usbnet *dev, struct usb_interface *intf)
186{ 219{
@@ -519,42 +552,6 @@ block:
519 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n"); 552 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
520} 553}
521 554
522static void intr_complete (struct urb *urb)
523{
524 struct usbnet *dev = urb->context;
525 int status = urb->status;
526
527 switch (status) {
528 /* success */
529 case 0:
530 dev->driver_info->status(dev, urb);
531 break;
532
533 /* software-driven interface shutdown */
534 case -ENOENT: /* urb killed */
535 case -ESHUTDOWN: /* hardware gone */
536 netif_dbg(dev, ifdown, dev->net,
537 "intr shutdown, code %d\n", status);
538 return;
539
540 /* NOTE: not throttling like RX/TX, since this endpoint
541 * already polls infrequently
542 */
543 default:
544 netdev_dbg(dev->net, "intr status %d\n", status);
545 break;
546 }
547
548 if (!netif_running (dev->net))
549 return;
550
551 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
552 status = usb_submit_urb (urb, GFP_ATOMIC);
553 if (status != 0)
554 netif_err(dev, timer, dev->net,
555 "intr resubmit --> %d\n", status);
556}
557
558/*-------------------------------------------------------------------------*/ 555/*-------------------------------------------------------------------------*/
559void usbnet_pause_rx(struct usbnet *dev) 556void usbnet_pause_rx(struct usbnet *dev)
560{ 557{
@@ -1312,7 +1309,6 @@ void usbnet_disconnect (struct usb_interface *intf)
1312 usb_free_urb(dev->interrupt); 1309 usb_free_urb(dev->interrupt);
1313 1310
1314 free_netdev(net); 1311 free_netdev(net);
1315 usb_put_dev (xdev);
1316} 1312}
1317EXPORT_SYMBOL_GPL(usbnet_disconnect); 1313EXPORT_SYMBOL_GPL(usbnet_disconnect);
1318 1314
@@ -1368,8 +1364,6 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1368 xdev = interface_to_usbdev (udev); 1364 xdev = interface_to_usbdev (udev);
1369 interface = udev->cur_altsetting; 1365 interface = udev->cur_altsetting;
1370 1366
1371 usb_get_dev (xdev);
1372
1373 status = -ENOMEM; 1367 status = -ENOMEM;
1374 1368
1375 // set up our own records 1369 // set up our own records
@@ -1498,7 +1492,6 @@ out3:
1498out1: 1492out1:
1499 free_netdev(net); 1493 free_netdev(net);
1500out: 1494out:
1501 usb_put_dev(xdev);
1502 return status; 1495 return status;
1503} 1496}
1504EXPORT_SYMBOL_GPL(usbnet_probe); 1497EXPORT_SYMBOL_GPL(usbnet_probe);
@@ -1600,7 +1593,7 @@ static int __init usbnet_init(void)
1600 BUILD_BUG_ON( 1593 BUILD_BUG_ON(
1601 FIELD_SIZEOF(struct sk_buff, cb) < sizeof(struct skb_data)); 1594 FIELD_SIZEOF(struct sk_buff, cb) < sizeof(struct skb_data));
1602 1595
1603 random_ether_addr(node_id); 1596 eth_random_addr(node_id);
1604 return 0; 1597 return 0;
1605} 1598}
1606module_init(usbnet_init); 1599module_init(usbnet_init);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index f18149ae2588..1db445b2ecc7 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1062,7 +1062,7 @@ static int virtnet_probe(struct virtio_device *vdev)
1062 return -ENOMEM; 1062 return -ENOMEM;
1063 1063
1064 /* Set up network device as normal. */ 1064 /* Set up network device as normal. */
1065 dev->priv_flags |= IFF_UNICAST_FLT; 1065 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
1066 dev->netdev_ops = &virtnet_netdev; 1066 dev->netdev_ops = &virtnet_netdev;
1067 dev->features = NETIF_F_HIGHDMA; 1067 dev->features = NETIF_F_HIGHDMA;
1068 1068
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 3f04ba0a5454..93e0cfb739b8 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1037,7 +1037,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1037#endif 1037#endif
1038 dev_dbg(&adapter->netdev->dev, 1038 dev_dbg(&adapter->netdev->dev,
1039 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n", 1039 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
1040 (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd - 1040 (u32)(ctx.sop_txd -
1041 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr), 1041 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
1042 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3])); 1042 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
1043 1043
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index d7a65e141d1a..44db8b75a531 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -231,7 +231,7 @@ static void x25_asy_encaps(struct x25_asy *sl, unsigned char *icp, int len)
231 } 231 }
232 232
233 p = icp; 233 p = icp;
234 count = x25_asy_esc(p, (unsigned char *) sl->xbuff, len); 234 count = x25_asy_esc(p, sl->xbuff, len);
235 235
236 /* Order of next two lines is *very* important. 236 /* Order of next two lines is *very* important.
237 * When we are sending a little amount of data, 237 * When we are sending a little amount of data,
diff --git a/drivers/net/wimax/i2400m/Kconfig b/drivers/net/wimax/i2400m/Kconfig
index 672de18a776c..71453db14258 100644
--- a/drivers/net/wimax/i2400m/Kconfig
+++ b/drivers/net/wimax/i2400m/Kconfig
@@ -7,9 +7,6 @@ config WIMAX_I2400M
7comment "Enable USB support to see WiMAX USB drivers" 7comment "Enable USB support to see WiMAX USB drivers"
8 depends on USB = n 8 depends on USB = n
9 9
10comment "Enable MMC support to see WiMAX SDIO drivers"
11 depends on MMC = n
12
13config WIMAX_I2400M_USB 10config WIMAX_I2400M_USB
14 tristate "Intel Wireless WiMAX Connection 2400 over USB (including 5x50)" 11 tristate "Intel Wireless WiMAX Connection 2400 over USB (including 5x50)"
15 depends on WIMAX && USB 12 depends on WIMAX && USB
@@ -21,25 +18,6 @@ config WIMAX_I2400M_USB
21 18
22 If unsure, it is safe to select M (module). 19 If unsure, it is safe to select M (module).
23 20
24config WIMAX_I2400M_SDIO
25 tristate "Intel Wireless WiMAX Connection 2400 over SDIO"
26 depends on WIMAX && MMC
27 select WIMAX_I2400M
28 help
29 Select if you have a device based on the Intel WiMAX
30 Connection 2400 over SDIO.
31
32 If unsure, it is safe to select M (module).
33
34config WIMAX_IWMC3200_SDIO
35 bool "Intel Wireless Multicom WiMAX Connection 3200 over SDIO (EXPERIMENTAL)"
36 depends on WIMAX_I2400M_SDIO
37 depends on EXPERIMENTAL
38 select IWMC3200TOP
39 help
40 Select if you have a device based on the Intel Multicom WiMAX
41 Connection 3200 over SDIO.
42
43config WIMAX_I2400M_DEBUG_LEVEL 21config WIMAX_I2400M_DEBUG_LEVEL
44 int "WiMAX i2400m debug level" 22 int "WiMAX i2400m debug level"
45 depends on WIMAX_I2400M 23 depends on WIMAX_I2400M
diff --git a/drivers/net/wimax/i2400m/Makefile b/drivers/net/wimax/i2400m/Makefile
index 5d9e018d31af..f6d19c348082 100644
--- a/drivers/net/wimax/i2400m/Makefile
+++ b/drivers/net/wimax/i2400m/Makefile
@@ -1,7 +1,6 @@
1 1
2obj-$(CONFIG_WIMAX_I2400M) += i2400m.o 2obj-$(CONFIG_WIMAX_I2400M) += i2400m.o
3obj-$(CONFIG_WIMAX_I2400M_USB) += i2400m-usb.o 3obj-$(CONFIG_WIMAX_I2400M_USB) += i2400m-usb.o
4obj-$(CONFIG_WIMAX_I2400M_SDIO) += i2400m-sdio.o
5 4
6i2400m-y := \ 5i2400m-y := \
7 control.o \ 6 control.o \
@@ -21,10 +20,3 @@ i2400m-usb-y := \
21 usb-tx.o \ 20 usb-tx.o \
22 usb-rx.o \ 21 usb-rx.o \
23 usb.o 22 usb.o
24
25
26i2400m-sdio-y := \
27 sdio.o \
28 sdio-tx.o \
29 sdio-fw.o \
30 sdio-rx.o
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 47cae7150bc1..025426132754 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -754,8 +754,7 @@ EXPORT_SYMBOL_GPL(i2400m_error_recovery);
754/* 754/*
755 * Alloc the command and ack buffers for boot mode 755 * Alloc the command and ack buffers for boot mode
756 * 756 *
757 * Get the buffers needed to deal with boot mode messages. These 757 * Get the buffers needed to deal with boot mode messages.
758 * buffers need to be allocated before the sdio receive irq is setup.
759 */ 758 */
760static 759static
761int i2400m_bm_buf_alloc(struct i2400m *i2400m) 760int i2400m_bm_buf_alloc(struct i2400m *i2400m)
@@ -897,7 +896,7 @@ int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags)
897 result = i2400m_read_mac_addr(i2400m); 896 result = i2400m_read_mac_addr(i2400m);
898 if (result < 0) 897 if (result < 0)
899 goto error_read_mac_addr; 898 goto error_read_mac_addr;
900 random_ether_addr(i2400m->src_mac_addr); 899 eth_random_addr(i2400m->src_mac_addr);
901 900
902 i2400m->pm_notifier.notifier_call = i2400m_pm_notifier; 901 i2400m->pm_notifier.notifier_call = i2400m_pm_notifier;
903 register_pm_notifier(&i2400m->pm_notifier); 902 register_pm_notifier(&i2400m->pm_notifier);
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index 7cbd7d231e11..283237f6f074 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -51,8 +51,7 @@
51 * firmware. Normal hardware takes only signed firmware. 51 * firmware. Normal hardware takes only signed firmware.
52 * 52 *
53 * On boot mode, in USB, we write to the device using the bulk out 53 * On boot mode, in USB, we write to the device using the bulk out
54 * endpoint and read from it in the notification endpoint. In SDIO we 54 * endpoint and read from it in the notification endpoint.
55 * talk to it via the write address and read from the read address.
56 * 55 *
57 * Upon entrance to boot mode, the device sends (preceded with a few 56 * Upon entrance to boot mode, the device sends (preceded with a few
58 * zero length packets (ZLPs) on the notification endpoint in USB) a 57 * zero length packets (ZLPs) on the notification endpoint in USB) a
@@ -1268,7 +1267,7 @@ int i2400m_fw_check(struct i2400m *i2400m, const void *bcf, size_t bcf_size)
1268 size_t leftover, offset, header_len, size; 1267 size_t leftover, offset, header_len, size;
1269 1268
1270 leftover = top - itr; 1269 leftover = top - itr;
1271 offset = itr - (const void *) bcf; 1270 offset = itr - bcf;
1272 if (leftover <= sizeof(*bcf_hdr)) { 1271 if (leftover <= sizeof(*bcf_hdr)) {
1273 dev_err(dev, "firmware %s: %zu B left at @%zx, " 1272 dev_err(dev, "firmware %s: %zu B left at @%zx, "
1274 "not enough for BCF header\n", 1273 "not enough for BCF header\n",
diff --git a/drivers/net/wimax/i2400m/i2400m-sdio.h b/drivers/net/wimax/i2400m/i2400m-sdio.h
deleted file mode 100644
index 1d63ffdedfde..000000000000
--- a/drivers/net/wimax/i2400m/i2400m-sdio.h
+++ /dev/null
@@ -1,157 +0,0 @@
1/*
2 * Intel Wireless WiMAX Connection 2400m
3 * SDIO-specific i2400m driver definitions
4 *
5 *
6 * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
17 * distribution.
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 *
35 * Intel Corporation <linux-wimax@intel.com>
36 * Brian Bian <brian.bian@intel.com>
37 * Dirk Brandewie <dirk.j.brandewie@intel.com>
38 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
39 * Yanir Lubetkin <yanirx.lubetkin@intel.com>
40 * - Initial implementation
41 *
42 *
43 * This driver implements the bus-specific part of the i2400m for
44 * SDIO. Check i2400m.h for a generic driver description.
45 *
46 * ARCHITECTURE
47 *
48 * This driver sits under the bus-generic i2400m driver, providing the
49 * connection to the device.
50 *
51 * When probed, all the function pointers are setup and then the
52 * bus-generic code called. The generic driver will then use the
53 * provided pointers for uploading firmware (i2400ms_bus_bm*() in
54 * sdio-fw.c) and then setting up the device (i2400ms_dev_*() in
55 * sdio.c).
56 *
57 * Once firmware is uploaded, TX functions (sdio-tx.c) are called when
58 * data is ready for transmission in the TX fifo; then the SDIO IRQ is
59 * fired and data is available (sdio-rx.c), it is sent to the generic
60 * driver for processing with i2400m_rx.
61 */
62
63#ifndef __I2400M_SDIO_H__
64#define __I2400M_SDIO_H__
65
66#include "i2400m.h"
67
68/* Host-Device interface for SDIO */
69enum {
70 I2400M_SDIO_BOOT_RETRIES = 3,
71 I2400MS_BLK_SIZE = 256,
72 I2400MS_PL_SIZE_MAX = 0x3E00,
73
74 I2400MS_DATA_ADDR = 0x0,
75 I2400MS_INTR_STATUS_ADDR = 0x13,
76 I2400MS_INTR_CLEAR_ADDR = 0x13,
77 I2400MS_INTR_ENABLE_ADDR = 0x14,
78 I2400MS_INTR_GET_SIZE_ADDR = 0x2C,
79 /* The number of ticks to wait for the device to signal that
80 * it is ready */
81 I2400MS_INIT_SLEEP_INTERVAL = 100,
82 /* How long to wait for the device to settle after reset */
83 I2400MS_SETTLE_TIME = 40,
84 /* The number of msec to wait for IOR after sending IOE */
85 IWMC3200_IOR_TIMEOUT = 10,
86};
87
88
89/**
90 * struct i2400ms - descriptor for a SDIO connected i2400m
91 *
92 * @i2400m: bus-generic i2400m implementation; has to be first (see
93 * it's documentation in i2400m.h).
94 *
95 * @func: pointer to our SDIO function
96 *
97 * @tx_worker: workqueue struct used to TX data when the bus-generic
98 * code signals packets are pending for transmission to the device.
99 *
100 * @tx_workqueue: workqeueue used for data TX; we don't use the
101 * system's workqueue as that might cause deadlocks with code in
102 * the bus-generic driver. The read/write operation to the queue
103 * is protected with spinlock (tx_lock in struct i2400m) to avoid
104 * the queue being destroyed in the middle of a the queue read/write
105 * operation.
106 *
107 * @debugfs_dentry: dentry for the SDIO specific debugfs files
108 *
109 * Note this value is set to NULL upon destruction; this is
110 * because some routinges use it to determine if we are inside the
111 * probe() path or some other path. When debugfs is disabled,
112 * creation sets the dentry to '(void*) -ENODEV', which is valid
113 * for the test.
114 */
115struct i2400ms {
116 struct i2400m i2400m; /* FIRST! See doc */
117 struct sdio_func *func;
118
119 struct work_struct tx_worker;
120 struct workqueue_struct *tx_workqueue;
121 char tx_wq_name[32];
122
123 struct dentry *debugfs_dentry;
124
125 wait_queue_head_t bm_wfa_wq;
126 int bm_wait_result;
127 size_t bm_ack_size;
128
129 /* Device is any of the iwmc3200 SKUs */
130 unsigned iwmc3200:1;
131};
132
133
134static inline
135void i2400ms_init(struct i2400ms *i2400ms)
136{
137 i2400m_init(&i2400ms->i2400m);
138}
139
140
141extern int i2400ms_rx_setup(struct i2400ms *);
142extern void i2400ms_rx_release(struct i2400ms *);
143
144extern int i2400ms_tx_setup(struct i2400ms *);
145extern void i2400ms_tx_release(struct i2400ms *);
146extern void i2400ms_bus_tx_kick(struct i2400m *);
147
148extern ssize_t i2400ms_bus_bm_cmd_send(struct i2400m *,
149 const struct i2400m_bootrom_header *,
150 size_t, int);
151extern ssize_t i2400ms_bus_bm_wait_for_ack(struct i2400m *,
152 struct i2400m_bootrom_header *,
153 size_t);
154extern void i2400ms_bus_bm_release(struct i2400m *);
155extern int i2400ms_bus_bm_setup(struct i2400m *);
156
157#endif /* #ifndef __I2400M_SDIO_H__ */
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index c806d4550212..79c6505b5c20 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -46,7 +46,7 @@
46 * - bus generic driver (this part) 46 * - bus generic driver (this part)
47 * 47 *
48 * The bus specific driver sets up stuff specific to the bus the 48 * The bus specific driver sets up stuff specific to the bus the
49 * device is connected to (USB, SDIO, PCI, tam-tam...non-authoritative 49 * device is connected to (USB, PCI, tam-tam...non-authoritative
50 * nor binding list) which is basically the device-model management 50 * nor binding list) which is basically the device-model management
51 * (probe/disconnect, etc), moving data from device to kernel and 51 * (probe/disconnect, etc), moving data from device to kernel and
52 * back, doing the power saving details and reseting the device. 52 * back, doing the power saving details and reseting the device.
@@ -238,14 +238,13 @@ struct i2400m_barker_db;
238 * amount needed for loading firmware, where us dev_start/stop setup 238 * amount needed for loading firmware, where us dev_start/stop setup
239 * the rest needed to do full data/control traffic. 239 * the rest needed to do full data/control traffic.
240 * 240 *
241 * @bus_tx_block_size: [fill] SDIO imposes a 256 block size, USB 16, 241 * @bus_tx_block_size: [fill] USB imposes a 16 block size, but other
242 * so we have a tx_blk_size variable that the bus layer sets to 242 * busses will differ. So we have a tx_blk_size variable that the
243 * tell the engine how much of that we need. 243 * bus layer sets to tell the engine how much of that we need.
244 * 244 *
245 * @bus_tx_room_min: [fill] Minimum room required while allocating 245 * @bus_tx_room_min: [fill] Minimum room required while allocating
246 * TX queue's buffer space for message header. SDIO requires 246 * TX queue's buffer space for message header. USB requires
247 * 224 bytes and USB 16 bytes. Refer bus specific driver code 247 * 16 bytes. Refer to bus specific driver code for details.
248 * for details.
249 * 248 *
250 * @bus_pl_size_max: [fill] Maximum payload size. 249 * @bus_pl_size_max: [fill] Maximum payload size.
251 * 250 *
diff --git a/drivers/net/wimax/i2400m/sdio-debug-levels.h b/drivers/net/wimax/i2400m/sdio-debug-levels.h
deleted file mode 100644
index c51998741301..000000000000
--- a/drivers/net/wimax/i2400m/sdio-debug-levels.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * debug levels control file for the i2400m module's
3 */
4#ifndef __debug_levels__h__
5#define __debug_levels__h__
6
7/* Maximum compile and run time debug level for all submodules */
8#define D_MODULENAME i2400m_sdio
9#define D_MASTER CONFIG_WIMAX_I2400M_DEBUG_LEVEL
10
11#include <linux/wimax/debug.h>
12
13/* List of all the enabled modules */
14enum d_module {
15 D_SUBMODULE_DECLARE(main),
16 D_SUBMODULE_DECLARE(tx),
17 D_SUBMODULE_DECLARE(rx),
18 D_SUBMODULE_DECLARE(fw)
19};
20
21
22#endif /* #ifndef __debug_levels__h__ */
diff --git a/drivers/net/wimax/i2400m/sdio-fw.c b/drivers/net/wimax/i2400m/sdio-fw.c
deleted file mode 100644
index 8e025418f5be..000000000000
--- a/drivers/net/wimax/i2400m/sdio-fw.c
+++ /dev/null
@@ -1,210 +0,0 @@
1/*
2 * Intel Wireless WiMAX Connection 2400m
3 * Firmware uploader's SDIO specifics
4 *
5 *
6 * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
17 * distribution.
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 *
35 * Intel Corporation <linux-wimax@intel.com>
36 * Yanir Lubetkin <yanirx.lubetkin@intel.com>
37 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
38 * - Initial implementation
39 *
40 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
41 * - Bus generic/specific split for USB
42 *
43 * Dirk Brandewie <dirk.j.brandewie@intel.com>
44 * - Initial implementation for SDIO
45 *
46 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
47 * - SDIO rehash for changes in the bus-driver model
48 *
49 * Dirk Brandewie <dirk.j.brandewie@intel.com>
50 * - Make it IRQ based, not polling
51 *
52 * THE PROCEDURE
53 *
54 * See fw.c for the generic description of this procedure.
55 *
56 * This file implements only the SDIO specifics. It boils down to how
57 * to send a command and waiting for an acknowledgement from the
58 * device.
59 *
60 * All this code is sequential -- all i2400ms_bus_bm_*() functions are
61 * executed in the same thread, except i2400ms_bm_irq() [on its own by
62 * the SDIO driver]. This makes it possible to avoid locking.
63 *
64 * COMMAND EXECUTION
65 *
66 * The generic firmware upload code will call i2400m_bus_bm_cmd_send()
67 * to send commands.
68 *
69 * The SDIO devices expects things in 256 byte blocks, so it will pad
70 * it, compute the checksum (if needed) and pass it to SDIO.
71 *
72 * ACK RECEPTION
73 *
74 * This works in IRQ mode -- the fw loader says when to wait for data
75 * and for that it calls i2400ms_bus_bm_wait_for_ack().
76 *
77 * This checks if there is any data available (RX size > 0); if not,
78 * waits for the IRQ handler to notify about it. Once there is data,
79 * it is read and passed to the caller. Doing it this way we don't
80 * need much coordination/locking, and it makes it much more difficult
81 * for an interrupt to be lost and the wait_for_ack() function getting
82 * stuck even when data is pending.
83 */
84#include <linux/mmc/sdio_func.h>
85#include "i2400m-sdio.h"
86
87
88#define D_SUBMODULE fw
89#include "sdio-debug-levels.h"
90
91
92/*
93 * Send a boot-mode command to the SDIO function
94 *
95 * We use a bounce buffer (i2400m->bm_cmd_buf) because we need to
96 * touch the header if the RAW flag is not set.
97 *
98 * @flags: pass thru from i2400m_bm_cmd()
99 * @return: cmd_size if ok, < 0 errno code on error.
100 *
101 * Note the command is padded to the SDIO block size for the device.
102 */
103ssize_t i2400ms_bus_bm_cmd_send(struct i2400m *i2400m,
104 const struct i2400m_bootrom_header *_cmd,
105 size_t cmd_size, int flags)
106{
107 ssize_t result;
108 struct device *dev = i2400m_dev(i2400m);
109 struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
110 int opcode = _cmd == NULL ? -1 : i2400m_brh_get_opcode(_cmd);
111 struct i2400m_bootrom_header *cmd;
112 /* SDIO restriction */
113 size_t cmd_size_a = ALIGN(cmd_size, I2400MS_BLK_SIZE);
114
115 d_fnstart(5, dev, "(i2400m %p cmd %p size %zu)\n",
116 i2400m, _cmd, cmd_size);
117 result = -E2BIG;
118 if (cmd_size > I2400M_BM_CMD_BUF_SIZE)
119 goto error_too_big;
120
121 if (_cmd != i2400m->bm_cmd_buf)
122 memmove(i2400m->bm_cmd_buf, _cmd, cmd_size);
123 cmd = i2400m->bm_cmd_buf;
124 if (cmd_size_a > cmd_size) /* Zero pad space */
125 memset(i2400m->bm_cmd_buf + cmd_size, 0, cmd_size_a - cmd_size);
126 if ((flags & I2400M_BM_CMD_RAW) == 0) {
127 if (WARN_ON(i2400m_brh_get_response_required(cmd) == 0))
128 dev_warn(dev, "SW BUG: response_required == 0\n");
129 i2400m_bm_cmd_prepare(cmd);
130 }
131 d_printf(4, dev, "BM cmd %d: %zu bytes (%zu padded)\n",
132 opcode, cmd_size, cmd_size_a);
133 d_dump(5, dev, cmd, cmd_size);
134
135 sdio_claim_host(i2400ms->func); /* Send & check */
136 result = sdio_memcpy_toio(i2400ms->func, I2400MS_DATA_ADDR,
137 i2400m->bm_cmd_buf, cmd_size_a);
138 sdio_release_host(i2400ms->func);
139 if (result < 0) {
140 dev_err(dev, "BM cmd %d: cannot send: %ld\n",
141 opcode, (long) result);
142 goto error_cmd_send;
143 }
144 result = cmd_size;
145error_cmd_send:
146error_too_big:
147 d_fnend(5, dev, "(i2400m %p cmd %p size %zu) = %d\n",
148 i2400m, _cmd, cmd_size, (int) result);
149 return result;
150}
151
152
153/*
154 * Read an ack from the device's boot-mode
155 *
156 * @i2400m:
157 * @_ack: pointer to where to store the read data
158 * @ack_size: how many bytes we should read
159 *
160 * Returns: < 0 errno code on error; otherwise, amount of received bytes.
161 *
162 * The ACK for a BM command is always at least sizeof(*ack) bytes, so
163 * check for that. We don't need to check for device reboots
164 *
165 */
166ssize_t i2400ms_bus_bm_wait_for_ack(struct i2400m *i2400m,
167 struct i2400m_bootrom_header *ack,
168 size_t ack_size)
169{
170 ssize_t result;
171 struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
172 struct sdio_func *func = i2400ms->func;
173 struct device *dev = &func->dev;
174 int size;
175
176 BUG_ON(sizeof(*ack) > ack_size);
177
178 d_fnstart(5, dev, "(i2400m %p ack %p size %zu)\n",
179 i2400m, ack, ack_size);
180
181 result = wait_event_timeout(i2400ms->bm_wfa_wq,
182 i2400ms->bm_ack_size != -EINPROGRESS,
183 2 * HZ);
184 if (result == 0) {
185 result = -ETIMEDOUT;
186 dev_err(dev, "BM: error waiting for an ack\n");
187 goto error_timeout;
188 }
189
190 spin_lock(&i2400m->rx_lock);
191 result = i2400ms->bm_ack_size;
192 BUG_ON(result == -EINPROGRESS);
193 if (result < 0) /* so we exit when rx_release() is called */
194 dev_err(dev, "BM: %s failed: %zd\n", __func__, result);
195 else {
196 size = min(ack_size, i2400ms->bm_ack_size);
197 memcpy(ack, i2400m->bm_ack_buf, size);
198 }
199 /*
200 * Remember always to clear the bm_ack_size to -EINPROGRESS
201 * after the RX data is processed
202 */
203 i2400ms->bm_ack_size = -EINPROGRESS;
204 spin_unlock(&i2400m->rx_lock);
205
206error_timeout:
207 d_fnend(5, dev, "(i2400m %p ack %p size %zu) = %zd\n",
208 i2400m, ack, ack_size, result);
209 return result;
210}
diff --git a/drivers/net/wimax/i2400m/sdio-rx.c b/drivers/net/wimax/i2400m/sdio-rx.c
deleted file mode 100644
index fb6396dd115f..000000000000
--- a/drivers/net/wimax/i2400m/sdio-rx.c
+++ /dev/null
@@ -1,301 +0,0 @@
1/*
2 * Intel Wireless WiMAX Connection 2400m
3 * SDIO RX handling
4 *
5 *
6 * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
17 * distribution.
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 *
35 * Intel Corporation <linux-wimax@intel.com>
36 * Dirk Brandewie <dirk.j.brandewie@intel.com>
37 * - Initial implementation
38 *
39 *
40 * This handles the RX path on SDIO.
41 *
42 * The SDIO bus driver calls the "irq" routine when data is available.
43 * This is not a traditional interrupt routine since the SDIO bus
44 * driver calls us from its irq thread context. Because of this
45 * sleeping in the SDIO RX IRQ routine is okay.
46 *
47 * From there on, we obtain the size of the data that is available,
48 * allocate an skb, copy it and then pass it to the generic driver's
49 * RX routine [i2400m_rx()].
50 *
51 * ROADMAP
52 *
53 * i2400ms_irq()
54 * i2400ms_rx()
55 * __i2400ms_rx_get_size()
56 * i2400m_is_boot_barker()
57 * i2400m_rx()
58 *
59 * i2400ms_rx_setup()
60 *
61 * i2400ms_rx_release()
62 */
63#include <linux/workqueue.h>
64#include <linux/wait.h>
65#include <linux/skbuff.h>
66#include <linux/mmc/sdio.h>
67#include <linux/mmc/sdio_func.h>
68#include <linux/slab.h>
69#include "i2400m-sdio.h"
70
71#define D_SUBMODULE rx
72#include "sdio-debug-levels.h"
73
74static const __le32 i2400m_ACK_BARKER[4] = {
75 __constant_cpu_to_le32(I2400M_ACK_BARKER),
76 __constant_cpu_to_le32(I2400M_ACK_BARKER),
77 __constant_cpu_to_le32(I2400M_ACK_BARKER),
78 __constant_cpu_to_le32(I2400M_ACK_BARKER)
79};
80
81
82/*
83 * Read and return the amount of bytes available for RX
84 *
85 * The RX size has to be read like this: byte reads of three
86 * sequential locations; then glue'em together.
87 *
88 * sdio_readl() doesn't work.
89 */
90static ssize_t __i2400ms_rx_get_size(struct i2400ms *i2400ms)
91{
92 int ret, cnt, val;
93 ssize_t rx_size;
94 unsigned xfer_size_addr;
95 struct sdio_func *func = i2400ms->func;
96 struct device *dev = &i2400ms->func->dev;
97
98 d_fnstart(7, dev, "(i2400ms %p)\n", i2400ms);
99 xfer_size_addr = I2400MS_INTR_GET_SIZE_ADDR;
100 rx_size = 0;
101 for (cnt = 0; cnt < 3; cnt++) {
102 val = sdio_readb(func, xfer_size_addr + cnt, &ret);
103 if (ret < 0) {
104 dev_err(dev, "RX: Can't read byte %d of RX size from "
105 "0x%08x: %d\n", cnt, xfer_size_addr + cnt, ret);
106 rx_size = ret;
107 goto error_read;
108 }
109 rx_size = rx_size << 8 | (val & 0xff);
110 }
111 d_printf(6, dev, "RX: rx_size is %ld\n", (long) rx_size);
112error_read:
113 d_fnend(7, dev, "(i2400ms %p) = %ld\n", i2400ms, (long) rx_size);
114 return rx_size;
115}
116
117
118/*
119 * Read data from the device (when in normal)
120 *
121 * Allocate an SKB of the right size, read the data in and then
122 * deliver it to the generic layer.
123 *
124 * We also check for a reboot barker. That means the device died and
125 * we have to reboot it.
126 */
127static
128void i2400ms_rx(struct i2400ms *i2400ms)
129{
130 int ret;
131 struct sdio_func *func = i2400ms->func;
132 struct device *dev = &func->dev;
133 struct i2400m *i2400m = &i2400ms->i2400m;
134 struct sk_buff *skb;
135 ssize_t rx_size;
136
137 d_fnstart(7, dev, "(i2400ms %p)\n", i2400ms);
138 rx_size = __i2400ms_rx_get_size(i2400ms);
139 if (rx_size < 0) {
140 ret = rx_size;
141 goto error_get_size;
142 }
143 /*
144 * Hardware quirk: make sure to clear the INTR status register
145 * AFTER getting the data transfer size.
146 */
147 sdio_writeb(func, 1, I2400MS_INTR_CLEAR_ADDR, &ret);
148
149 ret = -ENOMEM;
150 skb = alloc_skb(rx_size, GFP_ATOMIC);
151 if (NULL == skb) {
152 dev_err(dev, "RX: unable to alloc skb\n");
153 goto error_alloc_skb;
154 }
155 ret = sdio_memcpy_fromio(func, skb->data,
156 I2400MS_DATA_ADDR, rx_size);
157 if (ret < 0) {
158 dev_err(dev, "RX: SDIO data read failed: %d\n", ret);
159 goto error_memcpy_fromio;
160 }
161
162 rmb(); /* make sure we get boot_mode from dev_reset_handle */
163 if (unlikely(i2400m->boot_mode == 1)) {
164 spin_lock(&i2400m->rx_lock);
165 i2400ms->bm_ack_size = rx_size;
166 spin_unlock(&i2400m->rx_lock);
167 memcpy(i2400m->bm_ack_buf, skb->data, rx_size);
168 wake_up(&i2400ms->bm_wfa_wq);
169 d_printf(5, dev, "RX: SDIO boot mode message\n");
170 kfree_skb(skb);
171 goto out;
172 }
173 ret = -EIO;
174 if (unlikely(rx_size < sizeof(__le32))) {
175 dev_err(dev, "HW BUG? only %zu bytes received\n", rx_size);
176 goto error_bad_size;
177 }
178 if (likely(i2400m_is_d2h_barker(skb->data))) {
179 skb_put(skb, rx_size);
180 i2400m_rx(i2400m, skb);
181 } else if (unlikely(i2400m_is_boot_barker(i2400m,
182 skb->data, rx_size))) {
183 ret = i2400m_dev_reset_handle(i2400m, "device rebooted");
184 dev_err(dev, "RX: SDIO reboot barker\n");
185 kfree_skb(skb);
186 } else {
187 i2400m_unknown_barker(i2400m, skb->data, rx_size);
188 kfree_skb(skb);
189 }
190out:
191 d_fnend(7, dev, "(i2400ms %p) = void\n", i2400ms);
192 return;
193
194error_memcpy_fromio:
195 kfree_skb(skb);
196error_alloc_skb:
197error_get_size:
198error_bad_size:
199 d_fnend(7, dev, "(i2400ms %p) = %d\n", i2400ms, ret);
200}
201
202
203/*
204 * Process an interrupt from the SDIO card
205 *
206 * FIXME: need to process other events that are not just ready-to-read
207 *
208 * Checks there is data ready and then proceeds to read it.
209 */
210static
211void i2400ms_irq(struct sdio_func *func)
212{
213 int ret;
214 struct i2400ms *i2400ms = sdio_get_drvdata(func);
215 struct device *dev = &func->dev;
216 int val;
217
218 d_fnstart(6, dev, "(i2400ms %p)\n", i2400ms);
219 val = sdio_readb(func, I2400MS_INTR_STATUS_ADDR, &ret);
220 if (ret < 0) {
221 dev_err(dev, "RX: Can't read interrupt status: %d\n", ret);
222 goto error_no_irq;
223 }
224 if (!val) {
225 dev_err(dev, "RX: BUG? got IRQ but no interrupt ready?\n");
226 goto error_no_irq;
227 }
228 i2400ms_rx(i2400ms);
229error_no_irq:
230 d_fnend(6, dev, "(i2400ms %p) = void\n", i2400ms);
231}
232
233
234/*
235 * Setup SDIO RX
236 *
237 * Hooks up the IRQ handler and then enables IRQs.
238 */
239int i2400ms_rx_setup(struct i2400ms *i2400ms)
240{
241 int result;
242 struct sdio_func *func = i2400ms->func;
243 struct device *dev = &func->dev;
244 struct i2400m *i2400m = &i2400ms->i2400m;
245
246 d_fnstart(5, dev, "(i2400ms %p)\n", i2400ms);
247
248 init_waitqueue_head(&i2400ms->bm_wfa_wq);
249 spin_lock(&i2400m->rx_lock);
250 i2400ms->bm_wait_result = -EINPROGRESS;
251 /*
252 * Before we are about to enable the RX interrupt, make sure
253 * bm_ack_size is cleared to -EINPROGRESS which indicates
254 * no RX interrupt happened yet or the previous interrupt
255 * has been handled, we are ready to take the new interrupt
256 */
257 i2400ms->bm_ack_size = -EINPROGRESS;
258 spin_unlock(&i2400m->rx_lock);
259
260 sdio_claim_host(func);
261 result = sdio_claim_irq(func, i2400ms_irq);
262 if (result < 0) {
263 dev_err(dev, "Cannot claim IRQ: %d\n", result);
264 goto error_irq_claim;
265 }
266 result = 0;
267 sdio_writeb(func, 1, I2400MS_INTR_ENABLE_ADDR, &result);
268 if (result < 0) {
269 sdio_release_irq(func);
270 dev_err(dev, "Failed to enable interrupts %d\n", result);
271 }
272error_irq_claim:
273 sdio_release_host(func);
274 d_fnend(5, dev, "(i2400ms %p) = %d\n", i2400ms, result);
275 return result;
276}
277
278
279/*
280 * Tear down SDIO RX
281 *
282 * Disables IRQs in the device and removes the IRQ handler.
283 */
284void i2400ms_rx_release(struct i2400ms *i2400ms)
285{
286 int result;
287 struct sdio_func *func = i2400ms->func;
288 struct device *dev = &func->dev;
289 struct i2400m *i2400m = &i2400ms->i2400m;
290
291 d_fnstart(5, dev, "(i2400ms %p)\n", i2400ms);
292 spin_lock(&i2400m->rx_lock);
293 i2400ms->bm_ack_size = -EINTR;
294 spin_unlock(&i2400m->rx_lock);
295 wake_up_all(&i2400ms->bm_wfa_wq);
296 sdio_claim_host(func);
297 sdio_writeb(func, 0, I2400MS_INTR_ENABLE_ADDR, &result);
298 sdio_release_irq(func);
299 sdio_release_host(func);
300 d_fnend(5, dev, "(i2400ms %p) = %d\n", i2400ms, result);
301}
diff --git a/drivers/net/wimax/i2400m/sdio-tx.c b/drivers/net/wimax/i2400m/sdio-tx.c
deleted file mode 100644
index b53cd1c80e3e..000000000000
--- a/drivers/net/wimax/i2400m/sdio-tx.c
+++ /dev/null
@@ -1,177 +0,0 @@
1/*
2 * Intel Wireless WiMAX Connection 2400m
3 * SDIO TX transaction backends
4 *
5 *
6 * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
17 * distribution.
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 *
35 * Intel Corporation <linux-wimax@intel.com>
36 * Dirk Brandewie <dirk.j.brandewie@intel.com>
37 * - Initial implementation
38 *
39 *
40 * Takes the TX messages in the i2400m's driver TX FIFO and sends them
41 * to the device until there are no more.
42 *
43 * If we fail sending the message, we just drop it. There isn't much
44 * we can do at this point. Most of the traffic is network, which has
45 * recovery methods for dropped packets.
46 *
47 * The SDIO functions are not atomic, so we can't run from the context
48 * where i2400m->bus_tx_kick() [i2400ms_bus_tx_kick()] is being called
49 * (some times atomic). Thus, the actual TX work is deferred to a
50 * workqueue.
51 *
52 * ROADMAP
53 *
54 * i2400ms_bus_tx_kick()
55 * i2400ms_tx_submit() [through workqueue]
56 *
57 * i2400m_tx_setup()
58 *
59 * i2400m_tx_release()
60 */
61#include <linux/mmc/sdio_func.h>
62#include "i2400m-sdio.h"
63
64#define D_SUBMODULE tx
65#include "sdio-debug-levels.h"
66
67
68/*
69 * Pull TX transations from the TX FIFO and send them to the device
70 * until there are no more.
71 */
72static
73void i2400ms_tx_submit(struct work_struct *ws)
74{
75 int result;
76 struct i2400ms *i2400ms = container_of(ws, struct i2400ms, tx_worker);
77 struct i2400m *i2400m = &i2400ms->i2400m;
78 struct sdio_func *func = i2400ms->func;
79 struct device *dev = &func->dev;
80 struct i2400m_msg_hdr *tx_msg;
81 size_t tx_msg_size;
82
83 d_fnstart(4, dev, "(i2400ms %p, i2400m %p)\n", i2400ms, i2400ms);
84
85 while (NULL != (tx_msg = i2400m_tx_msg_get(i2400m, &tx_msg_size))) {
86 d_printf(2, dev, "TX: submitting %zu bytes\n", tx_msg_size);
87 d_dump(5, dev, tx_msg, tx_msg_size);
88
89 sdio_claim_host(func);
90 result = sdio_memcpy_toio(func, 0, tx_msg, tx_msg_size);
91 sdio_release_host(func);
92
93 i2400m_tx_msg_sent(i2400m);
94
95 if (result < 0) {
96 dev_err(dev, "TX: cannot submit TX; tx_msg @%zu %zu B:"
97 " %d\n", (void *) tx_msg - i2400m->tx_buf,
98 tx_msg_size, result);
99 }
100
101 if (result == -ETIMEDOUT) {
102 i2400m_error_recovery(i2400m);
103 break;
104 }
105 d_printf(2, dev, "TX: %zub submitted\n", tx_msg_size);
106 }
107
108 d_fnend(4, dev, "(i2400ms %p) = void\n", i2400ms);
109}
110
111
112/*
113 * The generic driver notifies us that there is data ready for TX
114 *
115 * Schedule a run of i2400ms_tx_submit() to handle it.
116 */
117void i2400ms_bus_tx_kick(struct i2400m *i2400m)
118{
119 struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
120 struct device *dev = &i2400ms->func->dev;
121 unsigned long flags;
122
123 d_fnstart(3, dev, "(i2400m %p) = void\n", i2400m);
124
125 /* schedule tx work, this is because tx may block, therefore
126 * it has to run in a thread context.
127 */
128 spin_lock_irqsave(&i2400m->tx_lock, flags);
129 if (i2400ms->tx_workqueue != NULL)
130 queue_work(i2400ms->tx_workqueue, &i2400ms->tx_worker);
131 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
132
133 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
134}
135
136int i2400ms_tx_setup(struct i2400ms *i2400ms)
137{
138 int result;
139 struct device *dev = &i2400ms->func->dev;
140 struct i2400m *i2400m = &i2400ms->i2400m;
141 struct workqueue_struct *tx_workqueue;
142 unsigned long flags;
143
144 d_fnstart(5, dev, "(i2400ms %p)\n", i2400ms);
145
146 INIT_WORK(&i2400ms->tx_worker, i2400ms_tx_submit);
147 snprintf(i2400ms->tx_wq_name, sizeof(i2400ms->tx_wq_name),
148 "%s-tx", i2400m->wimax_dev.name);
149 tx_workqueue =
150 create_singlethread_workqueue(i2400ms->tx_wq_name);
151 if (tx_workqueue == NULL) {
152 dev_err(dev, "TX: failed to create workqueue\n");
153 result = -ENOMEM;
154 } else
155 result = 0;
156 spin_lock_irqsave(&i2400m->tx_lock, flags);
157 i2400ms->tx_workqueue = tx_workqueue;
158 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
159 d_fnend(5, dev, "(i2400ms %p) = %d\n", i2400ms, result);
160 return result;
161}
162
163void i2400ms_tx_release(struct i2400ms *i2400ms)
164{
165 struct i2400m *i2400m = &i2400ms->i2400m;
166 struct workqueue_struct *tx_workqueue;
167 unsigned long flags;
168
169 tx_workqueue = i2400ms->tx_workqueue;
170
171 spin_lock_irqsave(&i2400m->tx_lock, flags);
172 i2400ms->tx_workqueue = NULL;
173 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
174
175 if (tx_workqueue)
176 destroy_workqueue(tx_workqueue);
177}
diff --git a/drivers/net/wimax/i2400m/sdio.c b/drivers/net/wimax/i2400m/sdio.c
deleted file mode 100644
index 21a9edd6e75d..000000000000
--- a/drivers/net/wimax/i2400m/sdio.c
+++ /dev/null
@@ -1,602 +0,0 @@
1/*
2 * Intel Wireless WiMAX Connection 2400m
3 * Linux driver model glue for the SDIO device, reset & fw upload
4 *
5 *
6 * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com>
7 * Dirk Brandewie <dirk.j.brandewie@intel.com>
8 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
9 * Yanir Lubetkin <yanirx.lubetkin@intel.com>
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License version
13 * 2 as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 * 02110-1301, USA.
24 *
25 *
26 * See i2400m-sdio.h for a general description of this driver.
27 *
28 * This file implements driver model glue, and hook ups for the
29 * generic driver to implement the bus-specific functions (device
30 * communication setup/tear down, firmware upload and resetting).
31 *
32 * ROADMAP
33 *
34 * i2400m_probe()
35 * alloc_netdev()
36 * i2400ms_netdev_setup()
37 * i2400ms_init()
38 * i2400m_netdev_setup()
39 * i2400ms_enable_function()
40 * i2400m_setup()
41 *
42 * i2400m_remove()
43 * i2400m_release()
44 * free_netdev(net_dev)
45 *
46 * i2400ms_bus_reset() Called by i2400m_reset
47 * __i2400ms_reset()
48 * __i2400ms_send_barker()
49 */
50
51#include <linux/slab.h>
52#include <linux/debugfs.h>
53#include <linux/mmc/sdio_ids.h>
54#include <linux/mmc/sdio.h>
55#include <linux/mmc/sdio_func.h>
56#include "i2400m-sdio.h"
57#include <linux/wimax/i2400m.h>
58#include <linux/module.h>
59
60#define D_SUBMODULE main
61#include "sdio-debug-levels.h"
62
63/* IOE WiMAX function timeout in seconds */
64static int ioe_timeout = 2;
65module_param(ioe_timeout, int, 0);
66
67static char i2400ms_debug_params[128];
68module_param_string(debug, i2400ms_debug_params, sizeof(i2400ms_debug_params),
69 0644);
70MODULE_PARM_DESC(debug,
71 "String of space-separated NAME:VALUE pairs, where NAMEs "
72 "are the different debug submodules and VALUE are the "
73 "initial debug value to set.");
74
75/* Our firmware file name list */
76static const char *i2400ms_bus_fw_names[] = {
77#define I2400MS_FW_FILE_NAME "i2400m-fw-sdio-1.3.sbcf"
78 I2400MS_FW_FILE_NAME,
79 NULL
80};
81
82
83static const struct i2400m_poke_table i2400ms_pokes[] = {
84 I2400M_FW_POKE(0x6BE260, 0x00000088),
85 I2400M_FW_POKE(0x080550, 0x00000005),
86 I2400M_FW_POKE(0xAE0000, 0x00000000),
87 I2400M_FW_POKE(0x000000, 0x00000000), /* MUST be 0 terminated or bad
88 * things will happen */
89};
90
91/*
92 * Enable the SDIO function
93 *
94 * Tries to enable the SDIO function; might fail if it is still not
95 * ready (in some hardware, the SDIO WiMAX function is only enabled
96 * when we ask it to explicitly doing). Tries until a timeout is
97 * reached.
98 *
99 * The @maxtries argument indicates how many times (at most) it should
100 * be tried to enable the function. 0 means forever. This acts along
101 * with the timeout (ie: it'll stop trying as soon as the maximum
102 * number of tries is reached _or_ as soon as the timeout is reached).
103 *
104 * The reverse of this is...sdio_disable_function()
105 *
106 * Returns: 0 if the SDIO function was enabled, < 0 errno code on
107 * error (-ENODEV when it was unable to enable the function).
108 */
109static
110int i2400ms_enable_function(struct i2400ms *i2400ms, unsigned maxtries)
111{
112 struct sdio_func *func = i2400ms->func;
113 u64 timeout;
114 int err;
115 struct device *dev = &func->dev;
116 unsigned tries = 0;
117
118 d_fnstart(3, dev, "(func %p)\n", func);
119 /* Setup timeout (FIXME: This needs to read the CIS table to
120 * get a real timeout) and then wait for the device to signal
121 * it is ready */
122 timeout = get_jiffies_64() + ioe_timeout * HZ;
123 err = -ENODEV;
124 while (err != 0 && time_before64(get_jiffies_64(), timeout)) {
125 sdio_claim_host(func);
126 /*
127 * There is a sillicon bug on the IWMC3200, where the
128 * IOE timeout will cause problems on Moorestown
129 * platforms (system hang). We explicitly overwrite
130 * func->enable_timeout here to work around the issue.
131 */
132 if (i2400ms->iwmc3200)
133 func->enable_timeout = IWMC3200_IOR_TIMEOUT;
134 err = sdio_enable_func(func);
135 if (0 == err) {
136 sdio_release_host(func);
137 d_printf(2, dev, "SDIO function enabled\n");
138 goto function_enabled;
139 }
140 d_printf(2, dev, "SDIO function failed to enable: %d\n", err);
141 sdio_release_host(func);
142 if (maxtries > 0 && ++tries >= maxtries) {
143 err = -ETIME;
144 break;
145 }
146 msleep(I2400MS_INIT_SLEEP_INTERVAL);
147 }
148 /* If timed out, device is not there yet -- get -ENODEV so
149 * the device driver core will retry later on. */
150 if (err == -ETIME) {
151 dev_err(dev, "Can't enable WiMAX function; "
152 " has the function been enabled?\n");
153 err = -ENODEV;
154 }
155function_enabled:
156 d_fnend(3, dev, "(func %p) = %d\n", func, err);
157 return err;
158}
159
160
161/*
162 * Setup minimal device communication infrastructure needed to at
163 * least be able to update the firmware.
164 *
165 * Note the ugly trick: if we are in the probe path
166 * (i2400ms->debugfs_dentry == NULL), we only retry function
167 * enablement one, to avoid racing with the iwmc3200 top controller.
168 */
169static
170int i2400ms_bus_setup(struct i2400m *i2400m)
171{
172 int result;
173 struct i2400ms *i2400ms =
174 container_of(i2400m, struct i2400ms, i2400m);
175 struct device *dev = i2400m_dev(i2400m);
176 struct sdio_func *func = i2400ms->func;
177 int retries;
178
179 sdio_claim_host(func);
180 result = sdio_set_block_size(func, I2400MS_BLK_SIZE);
181 sdio_release_host(func);
182 if (result < 0) {
183 dev_err(dev, "Failed to set block size: %d\n", result);
184 goto error_set_blk_size;
185 }
186
187 if (i2400ms->iwmc3200 && i2400ms->debugfs_dentry == NULL)
188 retries = 1;
189 else
190 retries = 0;
191 result = i2400ms_enable_function(i2400ms, retries);
192 if (result < 0) {
193 dev_err(dev, "Cannot enable SDIO function: %d\n", result);
194 goto error_func_enable;
195 }
196
197 result = i2400ms_tx_setup(i2400ms);
198 if (result < 0)
199 goto error_tx_setup;
200 result = i2400ms_rx_setup(i2400ms);
201 if (result < 0)
202 goto error_rx_setup;
203 return 0;
204
205error_rx_setup:
206 i2400ms_tx_release(i2400ms);
207error_tx_setup:
208 sdio_claim_host(func);
209 sdio_disable_func(func);
210 sdio_release_host(func);
211error_func_enable:
212error_set_blk_size:
213 return result;
214}
215
216
217/*
218 * Tear down minimal device communication infrastructure needed to at
219 * least be able to update the firmware.
220 */
221static
222void i2400ms_bus_release(struct i2400m *i2400m)
223{
224 struct i2400ms *i2400ms =
225 container_of(i2400m, struct i2400ms, i2400m);
226 struct sdio_func *func = i2400ms->func;
227
228 i2400ms_rx_release(i2400ms);
229 i2400ms_tx_release(i2400ms);
230 sdio_claim_host(func);
231 sdio_disable_func(func);
232 sdio_release_host(func);
233}
234
235
236/*
237 * Setup driver resources needed to communicate with the device
238 *
239 * The fw needs some time to settle, and it was just uploaded,
240 * so give it a break first. I'd prefer to just wait for the device to
241 * send something, but seems the poking we do to enable SDIO stuff
242 * interferes with it, so just give it a break before starting...
243 */
244static
245int i2400ms_bus_dev_start(struct i2400m *i2400m)
246{
247 struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
248 struct sdio_func *func = i2400ms->func;
249 struct device *dev = &func->dev;
250
251 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
252 msleep(200);
253 d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, 0);
254 return 0;
255}
256
257
258/*
259 * Sends a barker buffer to the device
260 *
261 * This helper will allocate a kmalloced buffer and use it to transmit
262 * (then free it). Reason for this is that the SDIO host controller
263 * expects alignment (unknown exactly which) which the stack won't
264 * really provide and certain arches/host-controller combinations
265 * cannot use stack/vmalloc/text areas for DMA transfers.
266 */
267static
268int __i2400ms_send_barker(struct i2400ms *i2400ms,
269 const __le32 *barker, size_t barker_size)
270{
271 int ret;
272 struct sdio_func *func = i2400ms->func;
273 struct device *dev = &func->dev;
274 void *buffer;
275
276 ret = -ENOMEM;
277 buffer = kmalloc(I2400MS_BLK_SIZE, GFP_KERNEL);
278 if (buffer == NULL)
279 goto error_kzalloc;
280
281 memcpy(buffer, barker, barker_size);
282 sdio_claim_host(func);
283 ret = sdio_memcpy_toio(func, 0, buffer, I2400MS_BLK_SIZE);
284 sdio_release_host(func);
285
286 if (ret < 0)
287 d_printf(0, dev, "E: barker error: %d\n", ret);
288
289 kfree(buffer);
290error_kzalloc:
291 return ret;
292}
293
294
295/*
296 * Reset a device at different levels (warm, cold or bus)
297 *
298 * @i2400ms: device descriptor
299 * @reset_type: soft, warm or bus reset (I2400M_RT_WARM/SOFT/BUS)
300 *
301 * FIXME: not tested -- need to confirm expected effects
302 *
303 * Warm and cold resets get an SDIO reset if they fail (unimplemented)
304 *
305 * Warm reset:
306 *
307 * The device will be fully reset internally, but won't be
308 * disconnected from the bus (so no reenumeration will
309 * happen). Firmware upload will be necessary.
310 *
311 * The device will send a reboot barker that will trigger the driver
312 * to reinitialize the state via __i2400m_dev_reset_handle.
313 *
314 *
315 * Cold and bus reset:
316 *
317 * The device will be fully reset internally, disconnected from the
318 * bus an a reenumeration will happen. Firmware upload will be
319 * necessary. Thus, we don't do any locking or struct
320 * reinitialization, as we are going to be fully disconnected and
321 * reenumerated.
322 *
323 * Note we need to return -ENODEV if a warm reset was requested and we
324 * had to resort to a bus reset. See i2400m_op_reset(), wimax_reset()
325 * and wimax_dev->op_reset.
326 *
327 * WARNING: no driver state saved/fixed
328 */
329static
330int i2400ms_bus_reset(struct i2400m *i2400m, enum i2400m_reset_type rt)
331{
332 int result = 0;
333 struct i2400ms *i2400ms =
334 container_of(i2400m, struct i2400ms, i2400m);
335 struct device *dev = i2400m_dev(i2400m);
336 static const __le32 i2400m_WARM_BOOT_BARKER[4] = {
337 cpu_to_le32(I2400M_WARM_RESET_BARKER),
338 cpu_to_le32(I2400M_WARM_RESET_BARKER),
339 cpu_to_le32(I2400M_WARM_RESET_BARKER),
340 cpu_to_le32(I2400M_WARM_RESET_BARKER),
341 };
342 static const __le32 i2400m_COLD_BOOT_BARKER[4] = {
343 cpu_to_le32(I2400M_COLD_RESET_BARKER),
344 cpu_to_le32(I2400M_COLD_RESET_BARKER),
345 cpu_to_le32(I2400M_COLD_RESET_BARKER),
346 cpu_to_le32(I2400M_COLD_RESET_BARKER),
347 };
348
349 if (rt == I2400M_RT_WARM)
350 result = __i2400ms_send_barker(i2400ms, i2400m_WARM_BOOT_BARKER,
351 sizeof(i2400m_WARM_BOOT_BARKER));
352 else if (rt == I2400M_RT_COLD)
353 result = __i2400ms_send_barker(i2400ms, i2400m_COLD_BOOT_BARKER,
354 sizeof(i2400m_COLD_BOOT_BARKER));
355 else if (rt == I2400M_RT_BUS) {
356do_bus_reset:
357
358 i2400ms_bus_release(i2400m);
359
360 /* Wait for the device to settle */
361 msleep(40);
362
363 result = i2400ms_bus_setup(i2400m);
364 } else
365 BUG();
366 if (result < 0 && rt != I2400M_RT_BUS) {
367 dev_err(dev, "%s reset failed (%d); trying SDIO reset\n",
368 rt == I2400M_RT_WARM ? "warm" : "cold", result);
369 rt = I2400M_RT_BUS;
370 goto do_bus_reset;
371 }
372 return result;
373}
374
375
376static
377void i2400ms_netdev_setup(struct net_device *net_dev)
378{
379 struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
380 struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
381 i2400ms_init(i2400ms);
382 i2400m_netdev_setup(net_dev);
383}
384
385
386/*
387 * Debug levels control; see debug.h
388 */
389struct d_level D_LEVEL[] = {
390 D_SUBMODULE_DEFINE(main),
391 D_SUBMODULE_DEFINE(tx),
392 D_SUBMODULE_DEFINE(rx),
393 D_SUBMODULE_DEFINE(fw),
394};
395size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
396
397
398#define __debugfs_register(prefix, name, parent) \
399do { \
400 result = d_level_register_debugfs(prefix, name, parent); \
401 if (result < 0) \
402 goto error; \
403} while (0)
404
405
406static
407int i2400ms_debugfs_add(struct i2400ms *i2400ms)
408{
409 int result;
410 struct dentry *dentry = i2400ms->i2400m.wimax_dev.debugfs_dentry;
411
412 dentry = debugfs_create_dir("i2400m-sdio", dentry);
413 result = PTR_ERR(dentry);
414 if (IS_ERR(dentry)) {
415 if (result == -ENODEV)
416 result = 0; /* No debugfs support */
417 goto error;
418 }
419 i2400ms->debugfs_dentry = dentry;
420 __debugfs_register("dl_", main, dentry);
421 __debugfs_register("dl_", tx, dentry);
422 __debugfs_register("dl_", rx, dentry);
423 __debugfs_register("dl_", fw, dentry);
424
425 return 0;
426
427error:
428 debugfs_remove_recursive(i2400ms->debugfs_dentry);
429 i2400ms->debugfs_dentry = NULL;
430 return result;
431}
432
433
434static struct device_type i2400ms_type = {
435 .name = "wimax",
436};
437
438/*
439 * Probe a i2400m interface and register it
440 *
441 * @func: SDIO function
442 * @id: SDIO device ID
443 * @returns: 0 if ok, < 0 errno code on error.
444 *
445 * Alloc a net device, initialize the bus-specific details and then
446 * calls the bus-generic initialization routine. That will register
447 * the wimax and netdev devices, upload the firmware [using
448 * _bus_bm_*()], call _bus_dev_start() to finalize the setup of the
449 * communication with the device and then will start to talk to it to
450 * finnish setting it up.
451 *
452 * Initialization is tricky; some instances of the hw are packed with
453 * others in a way that requires a third driver that enables the WiMAX
454 * function. In those cases, we can't enable the SDIO function and
455 * we'll return with -ENODEV. When the driver that enables the WiMAX
456 * function does its thing, it has to do a bus_rescan_devices() on the
457 * SDIO bus so this driver is called again to enumerate the WiMAX
458 * function.
459 */
460static
461int i2400ms_probe(struct sdio_func *func,
462 const struct sdio_device_id *id)
463{
464 int result;
465 struct net_device *net_dev;
466 struct device *dev = &func->dev;
467 struct i2400m *i2400m;
468 struct i2400ms *i2400ms;
469
470 /* Allocate instance [calls i2400m_netdev_setup() on it]. */
471 result = -ENOMEM;
472 net_dev = alloc_netdev(sizeof(*i2400ms), "wmx%d",
473 i2400ms_netdev_setup);
474 if (net_dev == NULL) {
475 dev_err(dev, "no memory for network device instance\n");
476 goto error_alloc_netdev;
477 }
478 SET_NETDEV_DEV(net_dev, dev);
479 SET_NETDEV_DEVTYPE(net_dev, &i2400ms_type);
480 i2400m = net_dev_to_i2400m(net_dev);
481 i2400ms = container_of(i2400m, struct i2400ms, i2400m);
482 i2400m->wimax_dev.net_dev = net_dev;
483 i2400ms->func = func;
484 sdio_set_drvdata(func, i2400ms);
485
486 i2400m->bus_tx_block_size = I2400MS_BLK_SIZE;
487 /*
488 * Room required in the TX queue for SDIO message to accommodate
489 * a smallest payload while allocating header space is 224 bytes,
490 * which is the smallest message size(the block size 256 bytes)
491 * minus the smallest message header size(32 bytes).
492 */
493 i2400m->bus_tx_room_min = I2400MS_BLK_SIZE - I2400M_PL_ALIGN * 2;
494 i2400m->bus_pl_size_max = I2400MS_PL_SIZE_MAX;
495 i2400m->bus_setup = i2400ms_bus_setup;
496 i2400m->bus_dev_start = i2400ms_bus_dev_start;
497 i2400m->bus_dev_stop = NULL;
498 i2400m->bus_release = i2400ms_bus_release;
499 i2400m->bus_tx_kick = i2400ms_bus_tx_kick;
500 i2400m->bus_reset = i2400ms_bus_reset;
501 /* The iwmc3200-wimax sometimes requires the driver to try
502 * hard when we paint it into a corner. */
503 i2400m->bus_bm_retries = I2400M_SDIO_BOOT_RETRIES;
504 i2400m->bus_bm_cmd_send = i2400ms_bus_bm_cmd_send;
505 i2400m->bus_bm_wait_for_ack = i2400ms_bus_bm_wait_for_ack;
506 i2400m->bus_fw_names = i2400ms_bus_fw_names;
507 i2400m->bus_bm_mac_addr_impaired = 1;
508 i2400m->bus_bm_pokes_table = &i2400ms_pokes[0];
509
510 switch (func->device) {
511 case SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX:
512 case SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX_2G5:
513 i2400ms->iwmc3200 = 1;
514 break;
515 default:
516 i2400ms->iwmc3200 = 0;
517 }
518
519 result = i2400m_setup(i2400m, I2400M_BRI_NO_REBOOT);
520 if (result < 0) {
521 dev_err(dev, "cannot setup device: %d\n", result);
522 goto error_setup;
523 }
524
525 result = i2400ms_debugfs_add(i2400ms);
526 if (result < 0) {
527 dev_err(dev, "cannot create SDIO debugfs: %d\n",
528 result);
529 goto error_debugfs_add;
530 }
531 return 0;
532
533error_debugfs_add:
534 i2400m_release(i2400m);
535error_setup:
536 sdio_set_drvdata(func, NULL);
537 free_netdev(net_dev);
538error_alloc_netdev:
539 return result;
540}
541
542
543static
544void i2400ms_remove(struct sdio_func *func)
545{
546 struct device *dev = &func->dev;
547 struct i2400ms *i2400ms = sdio_get_drvdata(func);
548 struct i2400m *i2400m = &i2400ms->i2400m;
549 struct net_device *net_dev = i2400m->wimax_dev.net_dev;
550
551 d_fnstart(3, dev, "SDIO func %p\n", func);
552 debugfs_remove_recursive(i2400ms->debugfs_dentry);
553 i2400ms->debugfs_dentry = NULL;
554 i2400m_release(i2400m);
555 sdio_set_drvdata(func, NULL);
556 free_netdev(net_dev);
557 d_fnend(3, dev, "SDIO func %p\n", func);
558}
559
560static
561const struct sdio_device_id i2400ms_sdio_ids[] = {
562 /* Intel: i2400m WiMAX (iwmc3200) over SDIO */
563 { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL,
564 SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX) },
565 { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL,
566 SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX_2G5) },
567 { /* end: all zeroes */ },
568};
569MODULE_DEVICE_TABLE(sdio, i2400ms_sdio_ids);
570
571
572static
573struct sdio_driver i2400m_sdio_driver = {
574 .name = KBUILD_MODNAME,
575 .probe = i2400ms_probe,
576 .remove = i2400ms_remove,
577 .id_table = i2400ms_sdio_ids,
578};
579
580
581static
582int __init i2400ms_driver_init(void)
583{
584 d_parse_params(D_LEVEL, D_LEVEL_SIZE, i2400ms_debug_params,
585 "i2400m_sdio.debug");
586 return sdio_register_driver(&i2400m_sdio_driver);
587}
588module_init(i2400ms_driver_init);
589
590
591static
592void __exit i2400ms_driver_exit(void)
593{
594 sdio_unregister_driver(&i2400m_sdio_driver);
595}
596module_exit(i2400ms_driver_exit);
597
598
599MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>");
600MODULE_DESCRIPTION("Intel 2400M WiMAX networking for SDIO");
601MODULE_LICENSE("GPL");
602MODULE_FIRMWARE(I2400MS_FW_FILE_NAME);
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 5f58fa53238c..6deaae18db57 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -276,7 +276,6 @@ source "drivers/net/wireless/hostap/Kconfig"
276source "drivers/net/wireless/ipw2x00/Kconfig" 276source "drivers/net/wireless/ipw2x00/Kconfig"
277source "drivers/net/wireless/iwlwifi/Kconfig" 277source "drivers/net/wireless/iwlwifi/Kconfig"
278source "drivers/net/wireless/iwlegacy/Kconfig" 278source "drivers/net/wireless/iwlegacy/Kconfig"
279source "drivers/net/wireless/iwmc3200wifi/Kconfig"
280source "drivers/net/wireless/libertas/Kconfig" 279source "drivers/net/wireless/libertas/Kconfig"
281source "drivers/net/wireless/orinoco/Kconfig" 280source "drivers/net/wireless/orinoco/Kconfig"
282source "drivers/net/wireless/p54/Kconfig" 281source "drivers/net/wireless/p54/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 0ce218b931d4..062dfdff6364 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -53,8 +53,6 @@ obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o
53 53
54obj-$(CONFIG_WL_TI) += ti/ 54obj-$(CONFIG_WL_TI) += ti/
55 55
56obj-$(CONFIG_IWM) += iwmc3200wifi/
57
58obj-$(CONFIG_MWIFIEX) += mwifiex/ 56obj-$(CONFIG_MWIFIEX) += mwifiex/
59 57
60obj-$(CONFIG_BRCMFMAC) += brcm80211/ 58obj-$(CONFIG_BRCMFMAC) += brcm80211/
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index 0ac09a2bd144..689a71c1af71 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1738,8 +1738,7 @@ static int adm8211_alloc_rings(struct ieee80211_hw *dev)
1738 return -ENOMEM; 1738 return -ENOMEM;
1739 } 1739 }
1740 1740
1741 priv->tx_ring = (struct adm8211_desc *)(priv->rx_ring + 1741 priv->tx_ring = priv->rx_ring + priv->rx_ring_size;
1742 priv->rx_ring_size);
1743 priv->tx_ring_dma = priv->rx_ring_dma + 1742 priv->tx_ring_dma = priv->rx_ring_dma +
1744 sizeof(struct adm8211_desc) * priv->rx_ring_size; 1743 sizeof(struct adm8211_desc) * priv->rx_ring_size;
1745 1744
@@ -1855,7 +1854,7 @@ static int __devinit adm8211_probe(struct pci_dev *pdev,
1855 if (!is_valid_ether_addr(perm_addr)) { 1854 if (!is_valid_ether_addr(perm_addr)) {
1856 printk(KERN_WARNING "%s (adm8211): Invalid hwaddr in EEPROM!\n", 1855 printk(KERN_WARNING "%s (adm8211): Invalid hwaddr in EEPROM!\n",
1857 pci_name(pdev)); 1856 pci_name(pdev));
1858 random_ether_addr(perm_addr); 1857 eth_random_addr(perm_addr);
1859 } 1858 }
1860 SET_IEEE80211_PERM_ADDR(dev, perm_addr); 1859 SET_IEEE80211_PERM_ADDR(dev, perm_addr);
1861 1860
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index a747c632597a..f9f15bb3f03a 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -1997,7 +1997,7 @@ static int mpi_send_packet (struct net_device *dev)
1997 * ------------------------------------------------ 1997 * ------------------------------------------------
1998 */ 1998 */
1999 1999
2000 memcpy((char *)ai->txfids[0].virtual_host_addr, 2000 memcpy(ai->txfids[0].virtual_host_addr,
2001 (char *)&wifictlhdr8023, sizeof(wifictlhdr8023)); 2001 (char *)&wifictlhdr8023, sizeof(wifictlhdr8023));
2002 2002
2003 payloadLen = (__le16 *)(ai->txfids[0].virtual_host_addr + 2003 payloadLen = (__le16 *)(ai->txfids[0].virtual_host_addr +
@@ -4212,7 +4212,7 @@ static int PC4500_writerid(struct airo_info *ai, u16 rid,
4212 airo_print_err(ai->dev->name, "%s: len=%d", __func__, len); 4212 airo_print_err(ai->dev->name, "%s: len=%d", __func__, len);
4213 rc = -1; 4213 rc = -1;
4214 } else { 4214 } else {
4215 memcpy((char *)ai->config_desc.virtual_host_addr, 4215 memcpy(ai->config_desc.virtual_host_addr,
4216 pBuf, len); 4216 pBuf, len);
4217 4217
4218 rc = issuecommand(ai, &cmd, &rsp); 4218 rc = issuecommand(ai, &cmd, &rsp);
diff --git a/drivers/net/wireless/ath/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig
index e18a9aa7b6ca..338c5c42357d 100644
--- a/drivers/net/wireless/ath/ath5k/Kconfig
+++ b/drivers/net/wireless/ath/ath5k/Kconfig
@@ -64,3 +64,11 @@ config ATH5K_PCI
64 ---help--- 64 ---help---
65 This adds support for PCI type chipsets of the 5xxx Atheros 65 This adds support for PCI type chipsets of the 5xxx Atheros
66 family. 66 family.
67
68config ATH5K_TEST_CHANNELS
69 bool "Enables testing channels on ath5k"
70 depends on ATH5K && CFG80211_CERTIFICATION_ONUS
71 ---help---
72 This enables non-standard IEEE 802.11 channels on ath5k, which
73 can be used for research purposes. This option should be disabled
74 unless doing research.
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 44ad6fe0278f..8c4c040a47b8 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -74,10 +74,6 @@ bool ath5k_modparam_nohwcrypt;
74module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO); 74module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
75MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); 75MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
76 76
77static bool modparam_all_channels;
78module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
79MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");
80
81static bool modparam_fastchanswitch; 77static bool modparam_fastchanswitch;
82module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO); 78module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO);
83MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios."); 79MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");
@@ -258,8 +254,15 @@ static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *re
258\********************/ 254\********************/
259 255
260/* 256/*
261 * Returns true for the channel numbers used without all_channels modparam. 257 * Returns true for the channel numbers used.
262 */ 258 */
259#ifdef CONFIG_ATH5K_TEST_CHANNELS
260static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
261{
262 return true;
263}
264
265#else
263static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band) 266static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
264{ 267{
265 if (band == IEEE80211_BAND_2GHZ && chan <= 14) 268 if (band == IEEE80211_BAND_2GHZ && chan <= 14)
@@ -276,6 +279,7 @@ static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
276 /* 802.11j 4.9GHz (20MHz) */ 279 /* 802.11j 4.9GHz (20MHz) */
277 (chan == 184 || chan == 188 || chan == 192 || chan == 196)); 280 (chan == 184 || chan == 188 || chan == 192 || chan == 196));
278} 281}
282#endif
279 283
280static unsigned int 284static unsigned int
281ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels, 285ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
@@ -316,8 +320,7 @@ ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
316 if (!ath5k_channel_ok(ah, &channels[count])) 320 if (!ath5k_channel_ok(ah, &channels[count]))
317 continue; 321 continue;
318 322
319 if (!modparam_all_channels && 323 if (!ath5k_is_standard_channel(ch, band))
320 !ath5k_is_standard_channel(ch, band))
321 continue; 324 continue;
322 325
323 count++; 326 count++;
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index b869a358ce43..aca1d2689e90 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -53,6 +53,11 @@
53 53
54#define DEFAULT_BG_SCAN_PERIOD 60 54#define DEFAULT_BG_SCAN_PERIOD 60
55 55
56struct ath6kl_cfg80211_match_probe_ssid {
57 struct cfg80211_ssid ssid;
58 u8 flag;
59};
60
56static struct ieee80211_rate ath6kl_rates[] = { 61static struct ieee80211_rate ath6kl_rates[] = {
57 RATETAB_ENT(10, 0x1, 0), 62 RATETAB_ENT(10, 0x1, 0),
58 RATETAB_ENT(20, 0x2, 0), 63 RATETAB_ENT(20, 0x2, 0),
@@ -576,6 +581,9 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
576 581
577 vif->nw_type = vif->next_mode; 582 vif->nw_type = vif->next_mode;
578 583
584 /* enable enhanced bmiss detection if applicable */
585 ath6kl_cfg80211_sta_bmiss_enhance(vif, true);
586
579 if (vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT) 587 if (vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)
580 nw_subtype = SUBTYPE_P2PCLIENT; 588 nw_subtype = SUBTYPE_P2PCLIENT;
581 589
@@ -852,20 +860,6 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
852 } 860 }
853 } 861 }
854 862
855 /*
856 * Send a disconnect command to target when a disconnect event is
857 * received with reason code other than 3 (DISCONNECT_CMD - disconnect
858 * request from host) to make the firmware stop trying to connect even
859 * after giving disconnect event. There will be one more disconnect
860 * event for this disconnect command with reason code DISCONNECT_CMD
861 * which will be notified to cfg80211.
862 */
863
864 if (reason != DISCONNECT_CMD) {
865 ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx);
866 return;
867 }
868
869 clear_bit(CONNECT_PEND, &vif->flags); 863 clear_bit(CONNECT_PEND, &vif->flags);
870 864
871 if (vif->sme_state == SME_CONNECTING) { 865 if (vif->sme_state == SME_CONNECTING) {
@@ -875,32 +869,96 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
875 WLAN_STATUS_UNSPECIFIED_FAILURE, 869 WLAN_STATUS_UNSPECIFIED_FAILURE,
876 GFP_KERNEL); 870 GFP_KERNEL);
877 } else if (vif->sme_state == SME_CONNECTED) { 871 } else if (vif->sme_state == SME_CONNECTED) {
878 cfg80211_disconnected(vif->ndev, reason, 872 cfg80211_disconnected(vif->ndev, proto_reason,
879 NULL, 0, GFP_KERNEL); 873 NULL, 0, GFP_KERNEL);
880 } 874 }
881 875
882 vif->sme_state = SME_DISCONNECTED; 876 vif->sme_state = SME_DISCONNECTED;
877
878 /*
879 * Send a disconnect command to target when a disconnect event is
880 * received with reason code other than 3 (DISCONNECT_CMD - disconnect
881 * request from host) to make the firmware stop trying to connect even
882 * after giving disconnect event. There will be one more disconnect
883 * event for this disconnect command with reason code DISCONNECT_CMD
884 * which won't be notified to cfg80211.
885 */
886 if (reason != DISCONNECT_CMD)
887 ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx);
883} 888}
884 889
885static int ath6kl_set_probed_ssids(struct ath6kl *ar, 890static int ath6kl_set_probed_ssids(struct ath6kl *ar,
886 struct ath6kl_vif *vif, 891 struct ath6kl_vif *vif,
887 struct cfg80211_ssid *ssids, int n_ssids) 892 struct cfg80211_ssid *ssids, int n_ssids,
893 struct cfg80211_match_set *match_set,
894 int n_match_ssid)
888{ 895{
889 u8 i; 896 u8 i, j, index_to_add, ssid_found = false;
897 struct ath6kl_cfg80211_match_probe_ssid ssid_list[MAX_PROBED_SSIDS];
898
899 memset(ssid_list, 0, sizeof(ssid_list));
890 900
891 if (n_ssids > MAX_PROBED_SSID_INDEX) 901 if (n_ssids > MAX_PROBED_SSIDS ||
902 n_match_ssid > MAX_PROBED_SSIDS)
892 return -EINVAL; 903 return -EINVAL;
893 904
894 for (i = 0; i < n_ssids; i++) { 905 for (i = 0; i < n_ssids; i++) {
906 memcpy(ssid_list[i].ssid.ssid,
907 ssids[i].ssid,
908 ssids[i].ssid_len);
909 ssid_list[i].ssid.ssid_len = ssids[i].ssid_len;
910
911 if (ssids[i].ssid_len)
912 ssid_list[i].flag = SPECIFIC_SSID_FLAG;
913 else
914 ssid_list[i].flag = ANY_SSID_FLAG;
915
916 if (n_match_ssid == 0)
917 ssid_list[i].flag |= MATCH_SSID_FLAG;
918 }
919
920 index_to_add = i;
921
922 for (i = 0; i < n_match_ssid; i++) {
923 ssid_found = false;
924
925 for (j = 0; j < n_ssids; j++) {
926 if ((match_set[i].ssid.ssid_len ==
927 ssid_list[j].ssid.ssid_len) &&
928 (!memcmp(ssid_list[j].ssid.ssid,
929 match_set[i].ssid.ssid,
930 match_set[i].ssid.ssid_len))) {
931 ssid_list[j].flag |= MATCH_SSID_FLAG;
932 ssid_found = true;
933 break;
934 }
935 }
936
937 if (ssid_found)
938 continue;
939
940 if (index_to_add >= MAX_PROBED_SSIDS)
941 continue;
942
943 ssid_list[index_to_add].ssid.ssid_len =
944 match_set[i].ssid.ssid_len;
945 memcpy(ssid_list[index_to_add].ssid.ssid,
946 match_set[i].ssid.ssid,
947 match_set[i].ssid.ssid_len);
948 ssid_list[index_to_add].flag |= MATCH_SSID_FLAG;
949 index_to_add++;
950 }
951
952 for (i = 0; i < index_to_add; i++) {
895 ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, i, 953 ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, i,
896 ssids[i].ssid_len ? 954 ssid_list[i].flag,
897 SPECIFIC_SSID_FLAG : ANY_SSID_FLAG, 955 ssid_list[i].ssid.ssid_len,
898 ssids[i].ssid_len, 956 ssid_list[i].ssid.ssid);
899 ssids[i].ssid); 957
900 } 958 }
901 959
902 /* Make sure no old entries are left behind */ 960 /* Make sure no old entries are left behind */
903 for (i = n_ssids; i < MAX_PROBED_SSID_INDEX; i++) { 961 for (i = index_to_add; i < MAX_PROBED_SSIDS; i++) {
904 ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, i, 962 ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, i,
905 DISABLE_SSID_FLAG, 0, NULL); 963 DISABLE_SSID_FLAG, 0, NULL);
906 } 964 }
@@ -934,7 +992,7 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
934 } 992 }
935 993
936 ret = ath6kl_set_probed_ssids(ar, vif, request->ssids, 994 ret = ath6kl_set_probed_ssids(ar, vif, request->ssids,
937 request->n_ssids); 995 request->n_ssids, NULL, 0);
938 if (ret < 0) 996 if (ret < 0)
939 return ret; 997 return ret;
940 998
@@ -943,7 +1001,7 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
943 WMI_FRAME_PROBE_REQ, 1001 WMI_FRAME_PROBE_REQ,
944 request->ie, request->ie_len); 1002 request->ie, request->ie_len);
945 if (ret) { 1003 if (ret) {
946 ath6kl_err("failed to set Probe Request appie for scan"); 1004 ath6kl_err("failed to set Probe Request appie for scan\n");
947 return ret; 1005 return ret;
948 } 1006 }
949 1007
@@ -1512,6 +1570,9 @@ static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy,
1512 } 1570 }
1513 } 1571 }
1514 1572
1573 /* need to clean up enhanced bmiss detection fw state */
1574 ath6kl_cfg80211_sta_bmiss_enhance(vif, false);
1575
1515set_iface_type: 1576set_iface_type:
1516 switch (type) { 1577 switch (type) {
1517 case NL80211_IFTYPE_STATION: 1578 case NL80211_IFTYPE_STATION:
@@ -2074,7 +2135,9 @@ static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
2074 if (wow && (wow->n_patterns > WOW_MAX_FILTERS_PER_LIST)) 2135 if (wow && (wow->n_patterns > WOW_MAX_FILTERS_PER_LIST))
2075 return -EINVAL; 2136 return -EINVAL;
2076 2137
2077 if (!test_bit(NETDEV_MCAST_ALL_ON, &vif->flags)) { 2138 if (!test_bit(NETDEV_MCAST_ALL_ON, &vif->flags) &&
2139 test_bit(ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER,
2140 ar->fw_capabilities)) {
2078 ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi, 2141 ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi,
2079 vif->fw_vif_idx, false); 2142 vif->fw_vif_idx, false);
2080 if (ret) 2143 if (ret)
@@ -2209,7 +2272,9 @@ static int ath6kl_wow_resume(struct ath6kl *ar)
2209 2272
2210 ar->state = ATH6KL_STATE_ON; 2273 ar->state = ATH6KL_STATE_ON;
2211 2274
2212 if (!test_bit(NETDEV_MCAST_ALL_OFF, &vif->flags)) { 2275 if (!test_bit(NETDEV_MCAST_ALL_OFF, &vif->flags) &&
2276 test_bit(ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER,
2277 ar->fw_capabilities)) {
2213 ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi, 2278 ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi,
2214 vif->fw_vif_idx, true); 2279 vif->fw_vif_idx, true);
2215 if (ret) 2280 if (ret)
@@ -2475,7 +2540,7 @@ void ath6kl_check_wow_status(struct ath6kl *ar)
2475static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum ieee80211_band band, 2540static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum ieee80211_band band,
2476 bool ht_enable) 2541 bool ht_enable)
2477{ 2542{
2478 struct ath6kl_htcap *htcap = &vif->htcap; 2543 struct ath6kl_htcap *htcap = &vif->htcap[band];
2479 2544
2480 if (htcap->ht_enable == ht_enable) 2545 if (htcap->ht_enable == ht_enable)
2481 return 0; 2546 return 0;
@@ -2585,33 +2650,28 @@ static int ath6kl_set_ies(struct ath6kl_vif *vif,
2585 return 0; 2650 return 0;
2586} 2651}
2587 2652
2588static int ath6kl_set_channel(struct wiphy *wiphy, struct net_device *dev, 2653void ath6kl_cfg80211_sta_bmiss_enhance(struct ath6kl_vif *vif, bool enable)
2589 struct ieee80211_channel *chan,
2590 enum nl80211_channel_type channel_type)
2591{ 2654{
2592 struct ath6kl_vif *vif; 2655 int err;
2593 2656
2594 /* 2657 if (WARN_ON(!test_bit(WMI_READY, &vif->ar->flag)))
2595 * 'dev' could be NULL if a channel change is required for the hardware 2658 return;
2596 * device itself, instead of a particular VIF.
2597 *
2598 * FIXME: To be handled properly when monitor mode is supported.
2599 */
2600 if (!dev)
2601 return -EBUSY;
2602 2659
2603 vif = netdev_priv(dev); 2660 if (vif->nw_type != INFRA_NETWORK)
2661 return;
2604 2662
2605 if (!ath6kl_cfg80211_ready(vif)) 2663 if (!test_bit(ATH6KL_FW_CAPABILITY_BMISS_ENHANCE,
2606 return -EIO; 2664 vif->ar->fw_capabilities))
2665 return;
2607 2666
2608 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: center_freq=%u hw_value=%u\n", 2667 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s fw bmiss enhance\n",
2609 __func__, chan->center_freq, chan->hw_value); 2668 enable ? "enable" : "disable");
2610 vif->next_chan = chan->center_freq;
2611 vif->next_ch_type = channel_type;
2612 vif->next_ch_band = chan->band;
2613 2669
2614 return 0; 2670 err = ath6kl_wmi_sta_bmiss_enhance_cmd(vif->ar->wmi,
2671 vif->fw_vif_idx, enable);
2672 if (err)
2673 ath6kl_err("failed to %s enhanced bmiss detection: %d\n",
2674 enable ? "enable" : "disable", err);
2615} 2675}
2616 2676
2617static int ath6kl_get_rsn_capab(struct cfg80211_beacon_data *beacon, 2677static int ath6kl_get_rsn_capab(struct cfg80211_beacon_data *beacon,
@@ -2694,9 +2754,15 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
2694 2754
2695 /* TODO: 2755 /* TODO:
2696 * info->interval 2756 * info->interval
2697 * info->dtim_period
2698 */ 2757 */
2699 2758
2759 ret = ath6kl_wmi_ap_set_dtim_cmd(ar->wmi, vif->fw_vif_idx,
2760 info->dtim_period);
2761
2762 /* ignore error, just print a warning and continue normally */
2763 if (ret)
2764 ath6kl_warn("Failed to set dtim_period in beacon: %d\n", ret);
2765
2700 if (info->beacon.head == NULL) 2766 if (info->beacon.head == NULL)
2701 return -EINVAL; 2767 return -EINVAL;
2702 mgmt = (struct ieee80211_mgmt *) info->beacon.head; 2768 mgmt = (struct ieee80211_mgmt *) info->beacon.head;
@@ -2791,7 +2857,7 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
2791 p.ssid_len = vif->ssid_len; 2857 p.ssid_len = vif->ssid_len;
2792 memcpy(p.ssid, vif->ssid, vif->ssid_len); 2858 memcpy(p.ssid, vif->ssid, vif->ssid_len);
2793 p.dot11_auth_mode = vif->dot11_auth_mode; 2859 p.dot11_auth_mode = vif->dot11_auth_mode;
2794 p.ch = cpu_to_le16(vif->next_chan); 2860 p.ch = cpu_to_le16(info->channel->center_freq);
2795 2861
2796 /* Enable uAPSD support by default */ 2862 /* Enable uAPSD support by default */
2797 res = ath6kl_wmi_ap_set_apsd(ar->wmi, vif->fw_vif_idx, true); 2863 res = ath6kl_wmi_ap_set_apsd(ar->wmi, vif->fw_vif_idx, true);
@@ -2815,8 +2881,8 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
2815 return res; 2881 return res;
2816 } 2882 }
2817 2883
2818 if (ath6kl_set_htcap(vif, vif->next_ch_band, 2884 if (ath6kl_set_htcap(vif, info->channel->band,
2819 vif->next_ch_type != NL80211_CHAN_NO_HT)) 2885 info->channel_type != NL80211_CHAN_NO_HT))
2820 return -EIO; 2886 return -EIO;
2821 2887
2822 /* 2888 /*
@@ -3160,10 +3226,24 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
3160 ath6kl_cfg80211_scan_complete_event(vif, true); 3226 ath6kl_cfg80211_scan_complete_event(vif, true);
3161 3227
3162 ret = ath6kl_set_probed_ssids(ar, vif, request->ssids, 3228 ret = ath6kl_set_probed_ssids(ar, vif, request->ssids,
3163 request->n_ssids); 3229 request->n_ssids,
3230 request->match_sets,
3231 request->n_match_sets);
3164 if (ret < 0) 3232 if (ret < 0)
3165 return ret; 3233 return ret;
3166 3234
3235 if (!request->n_match_sets) {
3236 ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
3237 ALL_BSS_FILTER, 0);
3238 if (ret < 0)
3239 return ret;
3240 } else {
3241 ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
3242 MATCHED_SSID_FILTER, 0);
3243 if (ret < 0)
3244 return ret;
3245 }
3246
3167 /* fw uses seconds, also make sure that it's >0 */ 3247 /* fw uses seconds, also make sure that it's >0 */
3168 interval = max_t(u16, 1, request->interval / 1000); 3248 interval = max_t(u16, 1, request->interval / 1000);
3169 3249
@@ -3185,7 +3265,7 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
3185 WMI_FRAME_PROBE_REQ, 3265 WMI_FRAME_PROBE_REQ,
3186 request->ie, request->ie_len); 3266 request->ie, request->ie_len);
3187 if (ret) { 3267 if (ret) {
3188 ath6kl_warn("Failed to set probe request IE for scheduled scan: %d", 3268 ath6kl_warn("Failed to set probe request IE for scheduled scan: %d\n",
3189 ret); 3269 ret);
3190 return ret; 3270 return ret;
3191 } 3271 }
@@ -3217,6 +3297,18 @@ static int ath6kl_cfg80211_sscan_stop(struct wiphy *wiphy,
3217 return 0; 3297 return 0;
3218} 3298}
3219 3299
3300static int ath6kl_cfg80211_set_bitrate(struct wiphy *wiphy,
3301 struct net_device *dev,
3302 const u8 *addr,
3303 const struct cfg80211_bitrate_mask *mask)
3304{
3305 struct ath6kl *ar = ath6kl_priv(dev);
3306 struct ath6kl_vif *vif = netdev_priv(dev);
3307
3308 return ath6kl_wmi_set_bitrate_mask(ar->wmi, vif->fw_vif_idx,
3309 mask);
3310}
3311
3220static const struct ieee80211_txrx_stypes 3312static const struct ieee80211_txrx_stypes
3221ath6kl_mgmt_stypes[NUM_NL80211_IFTYPES] = { 3313ath6kl_mgmt_stypes[NUM_NL80211_IFTYPES] = {
3222 [NL80211_IFTYPE_STATION] = { 3314 [NL80211_IFTYPE_STATION] = {
@@ -3271,7 +3363,6 @@ static struct cfg80211_ops ath6kl_cfg80211_ops = {
3271 .suspend = __ath6kl_cfg80211_suspend, 3363 .suspend = __ath6kl_cfg80211_suspend,
3272 .resume = __ath6kl_cfg80211_resume, 3364 .resume = __ath6kl_cfg80211_resume,
3273#endif 3365#endif
3274 .set_channel = ath6kl_set_channel,
3275 .start_ap = ath6kl_start_ap, 3366 .start_ap = ath6kl_start_ap,
3276 .change_beacon = ath6kl_change_beacon, 3367 .change_beacon = ath6kl_change_beacon,
3277 .stop_ap = ath6kl_stop_ap, 3368 .stop_ap = ath6kl_stop_ap,
@@ -3283,6 +3374,7 @@ static struct cfg80211_ops ath6kl_cfg80211_ops = {
3283 .mgmt_frame_register = ath6kl_mgmt_frame_register, 3374 .mgmt_frame_register = ath6kl_mgmt_frame_register,
3284 .sched_scan_start = ath6kl_cfg80211_sscan_start, 3375 .sched_scan_start = ath6kl_cfg80211_sscan_start,
3285 .sched_scan_stop = ath6kl_cfg80211_sscan_stop, 3376 .sched_scan_stop = ath6kl_cfg80211_sscan_stop,
3377 .set_bitrate_mask = ath6kl_cfg80211_set_bitrate,
3286}; 3378};
3287 3379
3288void ath6kl_cfg80211_stop(struct ath6kl_vif *vif) 3380void ath6kl_cfg80211_stop(struct ath6kl_vif *vif)
@@ -3410,7 +3502,8 @@ struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
3410 vif->listen_intvl_t = ATH6KL_DEFAULT_LISTEN_INTVAL; 3502 vif->listen_intvl_t = ATH6KL_DEFAULT_LISTEN_INTVAL;
3411 vif->bmiss_time_t = ATH6KL_DEFAULT_BMISS_TIME; 3503 vif->bmiss_time_t = ATH6KL_DEFAULT_BMISS_TIME;
3412 vif->bg_scan_period = 0; 3504 vif->bg_scan_period = 0;
3413 vif->htcap.ht_enable = true; 3505 vif->htcap[IEEE80211_BAND_2GHZ].ht_enable = true;
3506 vif->htcap[IEEE80211_BAND_5GHZ].ht_enable = true;
3414 3507
3415 memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN); 3508 memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
3416 if (fw_vif_idx != 0) 3509 if (fw_vif_idx != 0)
@@ -3470,7 +3563,13 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
3470 } 3563 }
3471 3564
3472 /* max num of ssids that can be probed during scanning */ 3565 /* max num of ssids that can be probed during scanning */
3473 wiphy->max_scan_ssids = MAX_PROBED_SSID_INDEX; 3566 wiphy->max_scan_ssids = MAX_PROBED_SSIDS;
3567
3568 /* max num of ssids that can be matched after scan */
3569 if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN_MATCH_LIST,
3570 ar->fw_capabilities))
3571 wiphy->max_match_sets = MAX_PROBED_SSIDS;
3572
3474 wiphy->max_scan_ie_len = 1000; /* FIX: what is correct limit? */ 3573 wiphy->max_scan_ie_len = 1000; /* FIX: what is correct limit? */
3475 switch (ar->hw.cap) { 3574 switch (ar->hw.cap) {
3476 case WMI_11AN_CAP: 3575 case WMI_11AN_CAP:
@@ -3507,6 +3606,17 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
3507 ath6kl_band_5ghz.ht_cap.cap = 0; 3606 ath6kl_band_5ghz.ht_cap.cap = 0;
3508 ath6kl_band_5ghz.ht_cap.ht_supported = false; 3607 ath6kl_band_5ghz.ht_cap.ht_supported = false;
3509 } 3608 }
3609
3610 if (ar->hw.flags & ATH6KL_HW_FLAG_64BIT_RATES) {
3611 ath6kl_band_2ghz.ht_cap.mcs.rx_mask[0] = 0xff;
3612 ath6kl_band_5ghz.ht_cap.mcs.rx_mask[0] = 0xff;
3613 ath6kl_band_2ghz.ht_cap.mcs.rx_mask[1] = 0xff;
3614 ath6kl_band_5ghz.ht_cap.mcs.rx_mask[1] = 0xff;
3615 } else {
3616 ath6kl_band_2ghz.ht_cap.mcs.rx_mask[0] = 0xff;
3617 ath6kl_band_5ghz.ht_cap.mcs.rx_mask[0] = 0xff;
3618 }
3619
3510 if (band_2gig) 3620 if (band_2gig)
3511 wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz; 3621 wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
3512 if (band_5gig) 3622 if (band_5gig)
@@ -3517,6 +3627,7 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
3517 wiphy->cipher_suites = cipher_suites; 3627 wiphy->cipher_suites = cipher_suites;
3518 wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); 3628 wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
3519 3629
3630#ifdef CONFIG_PM
3520 wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | 3631 wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
3521 WIPHY_WOWLAN_DISCONNECT | 3632 WIPHY_WOWLAN_DISCONNECT |
3522 WIPHY_WOWLAN_GTK_REKEY_FAILURE | 3633 WIPHY_WOWLAN_GTK_REKEY_FAILURE |
@@ -3526,8 +3637,9 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
3526 wiphy->wowlan.n_patterns = WOW_MAX_FILTERS_PER_LIST; 3637 wiphy->wowlan.n_patterns = WOW_MAX_FILTERS_PER_LIST;
3527 wiphy->wowlan.pattern_min_len = 1; 3638 wiphy->wowlan.pattern_min_len = 1;
3528 wiphy->wowlan.pattern_max_len = WOW_PATTERN_SIZE; 3639 wiphy->wowlan.pattern_max_len = WOW_PATTERN_SIZE;
3640#endif
3529 3641
3530 wiphy->max_sched_scan_ssids = MAX_PROBED_SSID_INDEX; 3642 wiphy->max_sched_scan_ssids = MAX_PROBED_SSIDS;
3531 3643
3532 ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM | 3644 ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM |
3533 WIPHY_FLAG_HAVE_AP_SME | 3645 WIPHY_FLAG_HAVE_AP_SME |
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.h b/drivers/net/wireless/ath/ath6kl/cfg80211.h
index 5ea8cbb79f43..b992046a1b0e 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.h
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.h
@@ -62,5 +62,7 @@ void ath6kl_cfg80211_cleanup(struct ath6kl *ar);
62 62
63struct ath6kl *ath6kl_cfg80211_create(void); 63struct ath6kl *ath6kl_cfg80211_create(void);
64void ath6kl_cfg80211_destroy(struct ath6kl *ar); 64void ath6kl_cfg80211_destroy(struct ath6kl *ar);
65/* TODO: remove this once ath6kl_vif_cleanup() is moved to cfg80211.c */
66void ath6kl_cfg80211_sta_bmiss_enhance(struct ath6kl_vif *vif, bool enable);
65 67
66#endif /* ATH6KL_CFG80211_H */ 68#endif /* ATH6KL_CFG80211_H */
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index 4d9c6f142698..d38a31de344c 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -100,6 +100,21 @@ enum ath6kl_fw_capability {
100 /* Firmware has support to override rsn cap of rsn ie */ 100 /* Firmware has support to override rsn cap of rsn ie */
101 ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE, 101 ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE,
102 102
103 /*
104 * Multicast support in WOW and host awake mode.
105 * Allow all multicast in host awake mode.
106 * Apply multicast filter in WOW mode.
107 */
108 ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER,
109
110 /* Firmware supports enhanced bmiss detection */
111 ATH6KL_FW_CAPABILITY_BMISS_ENHANCE,
112
113 /*
114 * FW supports matching of ssid in schedule scan
115 */
116 ATH6KL_FW_CAPABILITY_SCHED_SCAN_MATCH_LIST,
117
103 /* this needs to be last */ 118 /* this needs to be last */
104 ATH6KL_FW_CAPABILITY_MAX, 119 ATH6KL_FW_CAPABILITY_MAX,
105}; 120};
@@ -112,6 +127,10 @@ struct ath6kl_fw_ie {
112 u8 data[0]; 127 u8 data[0];
113}; 128};
114 129
130enum ath6kl_hw_flags {
131 ATH6KL_HW_FLAG_64BIT_RATES = BIT(0),
132};
133
115#define ATH6KL_FW_API2_FILE "fw-2.bin" 134#define ATH6KL_FW_API2_FILE "fw-2.bin"
116#define ATH6KL_FW_API3_FILE "fw-3.bin" 135#define ATH6KL_FW_API3_FILE "fw-3.bin"
117 136
@@ -196,7 +215,7 @@ struct ath6kl_fw_ie {
196 215
197#define AGGR_NUM_OF_FREE_NETBUFS 16 216#define AGGR_NUM_OF_FREE_NETBUFS 16
198 217
199#define AGGR_RX_TIMEOUT 400 /* in ms */ 218#define AGGR_RX_TIMEOUT 100 /* in ms */
200 219
201#define WMI_TIMEOUT (2 * HZ) 220#define WMI_TIMEOUT (2 * HZ)
202 221
@@ -245,7 +264,6 @@ struct skb_hold_q {
245 264
246struct rxtid { 265struct rxtid {
247 bool aggr; 266 bool aggr;
248 bool progress;
249 bool timer_mon; 267 bool timer_mon;
250 u16 win_sz; 268 u16 win_sz;
251 u16 seq_next; 269 u16 seq_next;
@@ -254,9 +272,15 @@ struct rxtid {
254 struct sk_buff_head q; 272 struct sk_buff_head q;
255 273
256 /* 274 /*
257 * FIXME: No clue what this should protect. Apparently it should 275 * lock mainly protects seq_next and hold_q. Movement of seq_next
258 * protect some of the fields above but they are also accessed 276 * needs to be protected between aggr_timeout() and
259 * without taking the lock. 277 * aggr_process_recv_frm(). hold_q will be holding the pending
278 * reorder frames and it's access should also be protected.
279 * Some of the other fields like hold_q_sz, win_sz and aggr are
280 * initialized/reset when receiving addba/delba req, also while
281 * deleting aggr state all the pending buffers are flushed before
282 * resetting these fields, so there should not be any race in accessing
283 * these fields.
260 */ 284 */
261 spinlock_t lock; 285 spinlock_t lock;
262}; 286};
@@ -541,7 +565,7 @@ struct ath6kl_vif {
541 struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1]; 565 struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1];
542 struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1]; 566 struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1];
543 struct aggr_info *aggr_cntxt; 567 struct aggr_info *aggr_cntxt;
544 struct ath6kl_htcap htcap; 568 struct ath6kl_htcap htcap[IEEE80211_NUM_BANDS];
545 569
546 struct timer_list disconnect_timer; 570 struct timer_list disconnect_timer;
547 struct timer_list sched_scan_timer; 571 struct timer_list sched_scan_timer;
@@ -553,9 +577,6 @@ struct ath6kl_vif {
553 u32 last_cancel_roc_id; 577 u32 last_cancel_roc_id;
554 u32 send_action_id; 578 u32 send_action_id;
555 bool probe_req_report; 579 bool probe_req_report;
556 u16 next_chan;
557 enum nl80211_channel_type next_ch_type;
558 enum ieee80211_band next_ch_band;
559 u16 assoc_bss_beacon_int; 580 u16 assoc_bss_beacon_int;
560 u16 listen_intvl_t; 581 u16 listen_intvl_t;
561 u16 bmiss_time_t; 582 u16 bmiss_time_t;
@@ -687,6 +708,8 @@ struct ath6kl {
687 u32 testscript_addr; 708 u32 testscript_addr;
688 enum wmi_phy_cap cap; 709 enum wmi_phy_cap cap;
689 710
711 u32 flags;
712
690 struct ath6kl_hw_fw { 713 struct ath6kl_hw_fw {
691 const char *dir; 714 const char *dir;
692 const char *otp; 715 const char *otp;
diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
index 2798624d3a9d..cd0e1ba410d6 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_mbox.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
@@ -1309,7 +1309,7 @@ static int ath6kl_htc_rx_packet(struct htc_target *target,
1309 } 1309 }
1310 1310
1311 ath6kl_dbg(ATH6KL_DBG_HTC, 1311 ath6kl_dbg(ATH6KL_DBG_HTC,
1312 "htc rx 0x%p hdr x%x len %d mbox 0x%x\n", 1312 "htc rx 0x%p hdr 0x%x len %d mbox 0x%x\n",
1313 packet, packet->info.rx.exp_hdr, 1313 packet, packet->info.rx.exp_hdr,
1314 padded_len, dev->ar->mbox_info.htc_addr); 1314 padded_len, dev->ar->mbox_info.htc_addr);
1315 1315
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 7eb0515f458a..f90b5db741cf 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -42,6 +42,7 @@ static const struct ath6kl_hw hw_list[] = {
42 .reserved_ram_size = 6912, 42 .reserved_ram_size = 6912,
43 .refclk_hz = 26000000, 43 .refclk_hz = 26000000,
44 .uarttx_pin = 8, 44 .uarttx_pin = 8,
45 .flags = 0,
45 46
46 /* hw2.0 needs override address hardcoded */ 47 /* hw2.0 needs override address hardcoded */
47 .app_start_override_addr = 0x944C00, 48 .app_start_override_addr = 0x944C00,
@@ -67,6 +68,7 @@ static const struct ath6kl_hw hw_list[] = {
67 .refclk_hz = 26000000, 68 .refclk_hz = 26000000,
68 .uarttx_pin = 8, 69 .uarttx_pin = 8,
69 .testscript_addr = 0x57ef74, 70 .testscript_addr = 0x57ef74,
71 .flags = 0,
70 72
71 .fw = { 73 .fw = {
72 .dir = AR6003_HW_2_1_1_FW_DIR, 74 .dir = AR6003_HW_2_1_1_FW_DIR,
@@ -91,6 +93,7 @@ static const struct ath6kl_hw hw_list[] = {
91 .board_addr = 0x433900, 93 .board_addr = 0x433900,
92 .refclk_hz = 26000000, 94 .refclk_hz = 26000000,
93 .uarttx_pin = 11, 95 .uarttx_pin = 11,
96 .flags = ATH6KL_HW_FLAG_64BIT_RATES,
94 97
95 .fw = { 98 .fw = {
96 .dir = AR6004_HW_1_0_FW_DIR, 99 .dir = AR6004_HW_1_0_FW_DIR,
@@ -110,6 +113,7 @@ static const struct ath6kl_hw hw_list[] = {
110 .board_addr = 0x43d400, 113 .board_addr = 0x43d400,
111 .refclk_hz = 40000000, 114 .refclk_hz = 40000000,
112 .uarttx_pin = 11, 115 .uarttx_pin = 11,
116 .flags = ATH6KL_HW_FLAG_64BIT_RATES,
113 117
114 .fw = { 118 .fw = {
115 .dir = AR6004_HW_1_1_FW_DIR, 119 .dir = AR6004_HW_1_1_FW_DIR,
@@ -129,6 +133,7 @@ static const struct ath6kl_hw hw_list[] = {
129 .board_addr = 0x435c00, 133 .board_addr = 0x435c00,
130 .refclk_hz = 40000000, 134 .refclk_hz = 40000000,
131 .uarttx_pin = 11, 135 .uarttx_pin = 11,
136 .flags = ATH6KL_HW_FLAG_64BIT_RATES,
132 137
133 .fw = { 138 .fw = {
134 .dir = AR6004_HW_1_2_FW_DIR, 139 .dir = AR6004_HW_1_2_FW_DIR,
@@ -938,6 +943,14 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name)
938 } 943 }
939 944
940 switch (ie_id) { 945 switch (ie_id) {
946 case ATH6KL_FW_IE_FW_VERSION:
947 strlcpy(ar->wiphy->fw_version, data,
948 sizeof(ar->wiphy->fw_version));
949
950 ath6kl_dbg(ATH6KL_DBG_BOOT,
951 "found fw version %s\n",
952 ar->wiphy->fw_version);
953 break;
941 case ATH6KL_FW_IE_OTP_IMAGE: 954 case ATH6KL_FW_IE_OTP_IMAGE:
942 ath6kl_dbg(ATH6KL_DBG_BOOT, "found otp image ie (%zd B)\n", 955 ath6kl_dbg(ATH6KL_DBG_BOOT, "found otp image ie (%zd B)\n",
943 ie_len); 956 ie_len);
@@ -991,9 +1004,6 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name)
991 ar->hw.reserved_ram_size); 1004 ar->hw.reserved_ram_size);
992 break; 1005 break;
993 case ATH6KL_FW_IE_CAPABILITIES: 1006 case ATH6KL_FW_IE_CAPABILITIES:
994 if (ie_len < DIV_ROUND_UP(ATH6KL_FW_CAPABILITY_MAX, 8))
995 break;
996
997 ath6kl_dbg(ATH6KL_DBG_BOOT, 1007 ath6kl_dbg(ATH6KL_DBG_BOOT,
998 "found firmware capabilities ie (%zd B)\n", 1008 "found firmware capabilities ie (%zd B)\n",
999 ie_len); 1009 ie_len);
@@ -1002,6 +1012,9 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name)
1002 index = i / 8; 1012 index = i / 8;
1003 bit = i % 8; 1013 bit = i % 8;
1004 1014
1015 if (index == ie_len)
1016 break;
1017
1005 if (data[index] & (1 << bit)) 1018 if (data[index] & (1 << bit))
1006 __set_bit(i, ar->fw_capabilities); 1019 __set_bit(i, ar->fw_capabilities);
1007 } 1020 }
@@ -1392,6 +1405,12 @@ static int ath6kl_init_upload(struct ath6kl *ar)
1392 ar->version.target_ver == AR6003_HW_2_1_1_VERSION) { 1405 ar->version.target_ver == AR6003_HW_2_1_1_VERSION) {
1393 ath6kl_err("temporary war to avoid sdio crc error\n"); 1406 ath6kl_err("temporary war to avoid sdio crc error\n");
1394 1407
1408 param = 0x28;
1409 address = GPIO_BASE_ADDRESS + GPIO_PIN9_ADDRESS;
1410 status = ath6kl_bmi_reg_write(ar, address, param);
1411 if (status)
1412 return status;
1413
1395 param = 0x20; 1414 param = 0x20;
1396 1415
1397 address = GPIO_BASE_ADDRESS + GPIO_PIN10_ADDRESS; 1416 address = GPIO_BASE_ADDRESS + GPIO_PIN10_ADDRESS;
@@ -1659,6 +1678,9 @@ void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready)
1659 cfg80211_scan_done(vif->scan_req, true); 1678 cfg80211_scan_done(vif->scan_req, true);
1660 vif->scan_req = NULL; 1679 vif->scan_req = NULL;
1661 } 1680 }
1681
1682 /* need to clean up enhanced bmiss detection fw state */
1683 ath6kl_cfg80211_sta_bmiss_enhance(vif, false);
1662} 1684}
1663 1685
1664void ath6kl_stop_txrx(struct ath6kl *ar) 1686void ath6kl_stop_txrx(struct ath6kl *ar)
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index e5524470529c..c189e28e86a9 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -554,20 +554,24 @@ void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver,
554 struct ath6kl *ar = devt; 554 struct ath6kl *ar = devt;
555 555
556 memcpy(ar->mac_addr, datap, ETH_ALEN); 556 memcpy(ar->mac_addr, datap, ETH_ALEN);
557 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: mac addr = %pM\n", 557
558 __func__, ar->mac_addr); 558 ath6kl_dbg(ATH6KL_DBG_BOOT,
559 "ready event mac addr %pM sw_ver 0x%x abi_ver 0x%x cap 0x%x\n",
560 ar->mac_addr, sw_ver, abi_ver, cap);
559 561
560 ar->version.wlan_ver = sw_ver; 562 ar->version.wlan_ver = sw_ver;
561 ar->version.abi_ver = abi_ver; 563 ar->version.abi_ver = abi_ver;
562 ar->hw.cap = cap; 564 ar->hw.cap = cap;
563 565
564 snprintf(ar->wiphy->fw_version, 566 if (strlen(ar->wiphy->fw_version) == 0) {
565 sizeof(ar->wiphy->fw_version), 567 snprintf(ar->wiphy->fw_version,
566 "%u.%u.%u.%u", 568 sizeof(ar->wiphy->fw_version),
567 (ar->version.wlan_ver & 0xf0000000) >> 28, 569 "%u.%u.%u.%u",
568 (ar->version.wlan_ver & 0x0f000000) >> 24, 570 (ar->version.wlan_ver & 0xf0000000) >> 28,
569 (ar->version.wlan_ver & 0x00ff0000) >> 16, 571 (ar->version.wlan_ver & 0x0f000000) >> 24,
570 (ar->version.wlan_ver & 0x0000ffff)); 572 (ar->version.wlan_ver & 0x00ff0000) >> 16,
573 (ar->version.wlan_ver & 0x0000ffff));
574 }
571 575
572 /* indicate to the waiting thread that the ready event was received */ 576 /* indicate to the waiting thread that the ready event was received */
573 set_bit(WMI_READY, &ar->flag); 577 set_bit(WMI_READY, &ar->flag);
@@ -598,7 +602,6 @@ static int ath6kl_commit_ch_switch(struct ath6kl_vif *vif, u16 channel)
598 602
599 struct ath6kl *ar = vif->ar; 603 struct ath6kl *ar = vif->ar;
600 604
601 vif->next_chan = channel;
602 vif->profile.ch = cpu_to_le16(channel); 605 vif->profile.ch = cpu_to_le16(channel);
603 606
604 switch (vif->nw_type) { 607 switch (vif->nw_type) {
@@ -1167,7 +1170,10 @@ static void ath6kl_set_multicast_list(struct net_device *ndev)
1167 else 1170 else
1168 clear_bit(NETDEV_MCAST_ALL_ON, &vif->flags); 1171 clear_bit(NETDEV_MCAST_ALL_ON, &vif->flags);
1169 1172
1170 mc_all_on = mc_all_on || (vif->ar->state == ATH6KL_STATE_ON); 1173 if (test_bit(ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER,
1174 vif->ar->fw_capabilities)) {
1175 mc_all_on = mc_all_on || (vif->ar->state == ATH6KL_STATE_ON);
1176 }
1171 1177
1172 if (!(ndev->flags & IFF_MULTICAST)) { 1178 if (!(ndev->flags & IFF_MULTICAST)) {
1173 mc_all_on = false; 1179 mc_all_on = false;
diff --git a/drivers/net/wireless/ath/ath6kl/target.h b/drivers/net/wireless/ath/ath6kl/target.h
index 78e0ef4567a5..a98c12ba70c1 100644
--- a/drivers/net/wireless/ath/ath6kl/target.h
+++ b/drivers/net/wireless/ath/ath6kl/target.h
@@ -45,6 +45,7 @@
45#define LPO_CAL_ENABLE_S 20 45#define LPO_CAL_ENABLE_S 20
46#define LPO_CAL_ENABLE 0x00100000 46#define LPO_CAL_ENABLE 0x00100000
47 47
48#define GPIO_PIN9_ADDRESS 0x0000004c
48#define GPIO_PIN10_ADDRESS 0x00000050 49#define GPIO_PIN10_ADDRESS 0x00000050
49#define GPIO_PIN11_ADDRESS 0x00000054 50#define GPIO_PIN11_ADDRESS 0x00000054
50#define GPIO_PIN12_ADDRESS 0x00000058 51#define GPIO_PIN12_ADDRESS 0x00000058
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index 67206aedea6c..7dfa0fd86d7b 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -1036,6 +1036,7 @@ static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid,
1036 rxtid = &agg_conn->rx_tid[tid]; 1036 rxtid = &agg_conn->rx_tid[tid];
1037 stats = &agg_conn->stat[tid]; 1037 stats = &agg_conn->stat[tid];
1038 1038
1039 spin_lock_bh(&rxtid->lock);
1039 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz); 1040 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
1040 1041
1041 /* 1042 /*
@@ -1054,8 +1055,6 @@ static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid,
1054 seq_end = seq_no ? seq_no : rxtid->seq_next; 1055 seq_end = seq_no ? seq_no : rxtid->seq_next;
1055 idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz); 1056 idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz);
1056 1057
1057 spin_lock_bh(&rxtid->lock);
1058
1059 do { 1058 do {
1060 node = &rxtid->hold_q[idx]; 1059 node = &rxtid->hold_q[idx];
1061 if ((order == 1) && (!node->skb)) 1060 if ((order == 1) && (!node->skb))
@@ -1127,11 +1126,13 @@ static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid,
1127 ((end > extended_end) && (cur > extended_end) && 1126 ((end > extended_end) && (cur > extended_end) &&
1128 (cur < end))) { 1127 (cur < end))) {
1129 aggr_deque_frms(agg_conn, tid, 0, 0); 1128 aggr_deque_frms(agg_conn, tid, 0, 0);
1129 spin_lock_bh(&rxtid->lock);
1130 if (cur >= rxtid->hold_q_sz - 1) 1130 if (cur >= rxtid->hold_q_sz - 1)
1131 rxtid->seq_next = cur - (rxtid->hold_q_sz - 1); 1131 rxtid->seq_next = cur - (rxtid->hold_q_sz - 1);
1132 else 1132 else
1133 rxtid->seq_next = ATH6KL_MAX_SEQ_NO - 1133 rxtid->seq_next = ATH6KL_MAX_SEQ_NO -
1134 (rxtid->hold_q_sz - 2 - cur); 1134 (rxtid->hold_q_sz - 2 - cur);
1135 spin_unlock_bh(&rxtid->lock);
1135 } else { 1136 } else {
1136 /* 1137 /*
1137 * Dequeue only those frames that are outside the 1138 * Dequeue only those frames that are outside the
@@ -1185,25 +1186,25 @@ static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid,
1185 aggr_deque_frms(agg_conn, tid, 0, 1); 1186 aggr_deque_frms(agg_conn, tid, 0, 1);
1186 1187
1187 if (agg_conn->timer_scheduled) 1188 if (agg_conn->timer_scheduled)
1188 rxtid->progress = true; 1189 return is_queued;
1189 else 1190
1190 for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) { 1191 spin_lock_bh(&rxtid->lock);
1191 if (rxtid->hold_q[idx].skb) { 1192 for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) {
1192 /* 1193 if (rxtid->hold_q[idx].skb) {
1193 * There is a frame in the queue and no 1194 /*
1194 * timer so start a timer to ensure that 1195 * There is a frame in the queue and no
1195 * the frame doesn't remain stuck 1196 * timer so start a timer to ensure that
1196 * forever. 1197 * the frame doesn't remain stuck
1197 */ 1198 * forever.
1198 agg_conn->timer_scheduled = true; 1199 */
1199 mod_timer(&agg_conn->timer, 1200 agg_conn->timer_scheduled = true;
1200 (jiffies + 1201 mod_timer(&agg_conn->timer,
1201 HZ * (AGGR_RX_TIMEOUT) / 1000)); 1202 (jiffies + (HZ * AGGR_RX_TIMEOUT) / 1000));
1202 rxtid->progress = false; 1203 rxtid->timer_mon = true;
1203 rxtid->timer_mon = true; 1204 break;
1204 break;
1205 }
1206 } 1205 }
1206 }
1207 spin_unlock_bh(&rxtid->lock);
1207 1208
1208 return is_queued; 1209 return is_queued;
1209} 1210}
@@ -1608,7 +1609,7 @@ static void aggr_timeout(unsigned long arg)
1608 rxtid = &aggr_conn->rx_tid[i]; 1609 rxtid = &aggr_conn->rx_tid[i];
1609 stats = &aggr_conn->stat[i]; 1610 stats = &aggr_conn->stat[i];
1610 1611
1611 if (!rxtid->aggr || !rxtid->timer_mon || rxtid->progress) 1612 if (!rxtid->aggr || !rxtid->timer_mon)
1612 continue; 1613 continue;
1613 1614
1614 stats->num_timeouts++; 1615 stats->num_timeouts++;
@@ -1626,14 +1627,15 @@ static void aggr_timeout(unsigned long arg)
1626 rxtid = &aggr_conn->rx_tid[i]; 1627 rxtid = &aggr_conn->rx_tid[i];
1627 1628
1628 if (rxtid->aggr && rxtid->hold_q) { 1629 if (rxtid->aggr && rxtid->hold_q) {
1630 spin_lock_bh(&rxtid->lock);
1629 for (j = 0; j < rxtid->hold_q_sz; j++) { 1631 for (j = 0; j < rxtid->hold_q_sz; j++) {
1630 if (rxtid->hold_q[j].skb) { 1632 if (rxtid->hold_q[j].skb) {
1631 aggr_conn->timer_scheduled = true; 1633 aggr_conn->timer_scheduled = true;
1632 rxtid->timer_mon = true; 1634 rxtid->timer_mon = true;
1633 rxtid->progress = false;
1634 break; 1635 break;
1635 } 1636 }
1636 } 1637 }
1638 spin_unlock_bh(&rxtid->lock);
1637 1639
1638 if (j >= rxtid->hold_q_sz) 1640 if (j >= rxtid->hold_q_sz)
1639 rxtid->timer_mon = false; 1641 rxtid->timer_mon = false;
@@ -1660,7 +1662,6 @@ static void aggr_delete_tid_state(struct aggr_info_conn *aggr_conn, u8 tid)
1660 aggr_deque_frms(aggr_conn, tid, 0, 0); 1662 aggr_deque_frms(aggr_conn, tid, 0, 0);
1661 1663
1662 rxtid->aggr = false; 1664 rxtid->aggr = false;
1663 rxtid->progress = false;
1664 rxtid->timer_mon = false; 1665 rxtid->timer_mon = false;
1665 rxtid->win_sz = 0; 1666 rxtid->win_sz = 0;
1666 rxtid->seq_next = 0; 1667 rxtid->seq_next = 0;
@@ -1739,7 +1740,6 @@ void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
1739 for (i = 0; i < NUM_OF_TIDS; i++) { 1740 for (i = 0; i < NUM_OF_TIDS; i++) {
1740 rxtid = &aggr_conn->rx_tid[i]; 1741 rxtid = &aggr_conn->rx_tid[i];
1741 rxtid->aggr = false; 1742 rxtid->aggr = false;
1742 rxtid->progress = false;
1743 rxtid->timer_mon = false; 1743 rxtid->timer_mon = false;
1744 skb_queue_head_init(&rxtid->q); 1744 skb_queue_head_init(&rxtid->q);
1745 spin_lock_init(&rxtid->lock); 1745 spin_lock_init(&rxtid->lock);
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index ee8ec2394c2c..a6caa673e8ad 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -743,7 +743,6 @@ int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid)
743 return -ENOMEM; 743 return -ENOMEM;
744 744
745 cmd = (struct roam_ctrl_cmd *) skb->data; 745 cmd = (struct roam_ctrl_cmd *) skb->data;
746 memset(cmd, 0, sizeof(*cmd));
747 746
748 memcpy(cmd->info.bssid, bssid, ETH_ALEN); 747 memcpy(cmd->info.bssid, bssid, ETH_ALEN);
749 cmd->roam_ctrl = WMI_FORCE_ROAM; 748 cmd->roam_ctrl = WMI_FORCE_ROAM;
@@ -753,6 +752,22 @@ int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid)
753 NO_SYNC_WMIFLAG); 752 NO_SYNC_WMIFLAG);
754} 753}
755 754
755int ath6kl_wmi_ap_set_dtim_cmd(struct wmi *wmi, u8 if_idx, u32 dtim_period)
756{
757 struct sk_buff *skb;
758 struct set_dtim_cmd *cmd;
759
760 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
761 if (!skb)
762 return -ENOMEM;
763
764 cmd = (struct set_dtim_cmd *) skb->data;
765
766 cmd->dtim_period = cpu_to_le32(dtim_period);
767 return ath6kl_wmi_cmd_send(wmi, if_idx, skb,
768 WMI_AP_SET_DTIM_CMDID, NO_SYNC_WMIFLAG);
769}
770
756int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode) 771int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode)
757{ 772{
758 struct sk_buff *skb; 773 struct sk_buff *skb;
@@ -763,7 +778,6 @@ int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode)
763 return -ENOMEM; 778 return -ENOMEM;
764 779
765 cmd = (struct roam_ctrl_cmd *) skb->data; 780 cmd = (struct roam_ctrl_cmd *) skb->data;
766 memset(cmd, 0, sizeof(*cmd));
767 781
768 cmd->info.roam_mode = mode; 782 cmd->info.roam_mode = mode;
769 cmd->roam_ctrl = WMI_SET_ROAM_MODE; 783 cmd->roam_ctrl = WMI_SET_ROAM_MODE;
@@ -1995,7 +2009,7 @@ int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 if_idx, u8 index, u8 flag,
1995 struct wmi_probed_ssid_cmd *cmd; 2009 struct wmi_probed_ssid_cmd *cmd;
1996 int ret; 2010 int ret;
1997 2011
1998 if (index > MAX_PROBED_SSID_INDEX) 2012 if (index >= MAX_PROBED_SSIDS)
1999 return -EINVAL; 2013 return -EINVAL;
2000 2014
2001 if (ssid_len > sizeof(cmd->ssid)) 2015 if (ssid_len > sizeof(cmd->ssid))
@@ -2599,6 +2613,115 @@ static void ath6kl_wmi_relinquish_implicit_pstream_credits(struct wmi *wmi)
2599 spin_unlock_bh(&wmi->lock); 2613 spin_unlock_bh(&wmi->lock);
2600} 2614}
2601 2615
2616static int ath6kl_set_bitrate_mask64(struct wmi *wmi, u8 if_idx,
2617 const struct cfg80211_bitrate_mask *mask)
2618{
2619 struct sk_buff *skb;
2620 int ret, mode, band;
2621 u64 mcsrate, ratemask[IEEE80211_NUM_BANDS];
2622 struct wmi_set_tx_select_rates64_cmd *cmd;
2623
2624 memset(&ratemask, 0, sizeof(ratemask));
2625 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
2626 /* copy legacy rate mask */
2627 ratemask[band] = mask->control[band].legacy;
2628 if (band == IEEE80211_BAND_5GHZ)
2629 ratemask[band] =
2630 mask->control[band].legacy << 4;
2631
2632 /* copy mcs rate mask */
2633 mcsrate = mask->control[band].mcs[1];
2634 mcsrate <<= 8;
2635 mcsrate |= mask->control[band].mcs[0];
2636 ratemask[band] |= mcsrate << 12;
2637 ratemask[band] |= mcsrate << 28;
2638 }
2639
2640 ath6kl_dbg(ATH6KL_DBG_WMI,
2641 "Ratemask 64 bit: 2.4:%llx 5:%llx\n",
2642 ratemask[0], ratemask[1]);
2643
2644 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd) * WMI_RATES_MODE_MAX);
2645 if (!skb)
2646 return -ENOMEM;
2647
2648 cmd = (struct wmi_set_tx_select_rates64_cmd *) skb->data;
2649 for (mode = 0; mode < WMI_RATES_MODE_MAX; mode++) {
2650 /* A mode operate in 5GHZ band */
2651 if (mode == WMI_RATES_MODE_11A ||
2652 mode == WMI_RATES_MODE_11A_HT20 ||
2653 mode == WMI_RATES_MODE_11A_HT40)
2654 band = IEEE80211_BAND_5GHZ;
2655 else
2656 band = IEEE80211_BAND_2GHZ;
2657 cmd->ratemask[mode] = cpu_to_le64(ratemask[band]);
2658 }
2659
2660 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
2661 WMI_SET_TX_SELECT_RATES_CMDID,
2662 NO_SYNC_WMIFLAG);
2663 return ret;
2664}
2665
2666static int ath6kl_set_bitrate_mask32(struct wmi *wmi, u8 if_idx,
2667 const struct cfg80211_bitrate_mask *mask)
2668{
2669 struct sk_buff *skb;
2670 int ret, mode, band;
2671 u32 mcsrate, ratemask[IEEE80211_NUM_BANDS];
2672 struct wmi_set_tx_select_rates32_cmd *cmd;
2673
2674 memset(&ratemask, 0, sizeof(ratemask));
2675 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
2676 /* copy legacy rate mask */
2677 ratemask[band] = mask->control[band].legacy;
2678 if (band == IEEE80211_BAND_5GHZ)
2679 ratemask[band] =
2680 mask->control[band].legacy << 4;
2681
2682 /* copy mcs rate mask */
2683 mcsrate = mask->control[band].mcs[0];
2684 ratemask[band] |= mcsrate << 12;
2685 ratemask[band] |= mcsrate << 20;
2686 }
2687
2688 ath6kl_dbg(ATH6KL_DBG_WMI,
2689 "Ratemask 32 bit: 2.4:%x 5:%x\n",
2690 ratemask[0], ratemask[1]);
2691
2692 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd) * WMI_RATES_MODE_MAX);
2693 if (!skb)
2694 return -ENOMEM;
2695
2696 cmd = (struct wmi_set_tx_select_rates32_cmd *) skb->data;
2697 for (mode = 0; mode < WMI_RATES_MODE_MAX; mode++) {
2698 /* A mode operate in 5GHZ band */
2699 if (mode == WMI_RATES_MODE_11A ||
2700 mode == WMI_RATES_MODE_11A_HT20 ||
2701 mode == WMI_RATES_MODE_11A_HT40)
2702 band = IEEE80211_BAND_5GHZ;
2703 else
2704 band = IEEE80211_BAND_2GHZ;
2705 cmd->ratemask[mode] = cpu_to_le32(ratemask[band]);
2706 }
2707
2708 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
2709 WMI_SET_TX_SELECT_RATES_CMDID,
2710 NO_SYNC_WMIFLAG);
2711 return ret;
2712}
2713
2714int ath6kl_wmi_set_bitrate_mask(struct wmi *wmi, u8 if_idx,
2715 const struct cfg80211_bitrate_mask *mask)
2716{
2717 struct ath6kl *ar = wmi->parent_dev;
2718
2719 if (ar->hw.flags & ATH6KL_HW_FLAG_64BIT_RATES)
2720 return ath6kl_set_bitrate_mask64(wmi, if_idx, mask);
2721 else
2722 return ath6kl_set_bitrate_mask32(wmi, if_idx, mask);
2723}
2724
2602int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx, 2725int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
2603 enum ath6kl_host_mode host_mode) 2726 enum ath6kl_host_mode host_mode)
2604{ 2727{
@@ -2997,6 +3120,25 @@ int ath6kl_wmi_add_del_mcast_filter_cmd(struct wmi *wmi, u8 if_idx,
2997 return ret; 3120 return ret;
2998} 3121}
2999 3122
3123int ath6kl_wmi_sta_bmiss_enhance_cmd(struct wmi *wmi, u8 if_idx, bool enhance)
3124{
3125 struct sk_buff *skb;
3126 struct wmi_sta_bmiss_enhance_cmd *cmd;
3127 int ret;
3128
3129 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
3130 if (!skb)
3131 return -ENOMEM;
3132
3133 cmd = (struct wmi_sta_bmiss_enhance_cmd *) skb->data;
3134 cmd->enable = enhance ? 1 : 0;
3135
3136 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
3137 WMI_STA_BMISS_ENHANCE_CMDID,
3138 NO_SYNC_WMIFLAG);
3139 return ret;
3140}
3141
3000s32 ath6kl_wmi_get_rate(s8 rate_index) 3142s32 ath6kl_wmi_get_rate(s8 rate_index)
3001{ 3143{
3002 if (rate_index == RATE_AUTO) 3144 if (rate_index == RATE_AUTO)
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index 9076bec3a2ba..43339aca585d 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -624,6 +624,10 @@ enum wmi_cmd_id {
624 WMI_SEND_MGMT_CMDID, 624 WMI_SEND_MGMT_CMDID,
625 WMI_BEGIN_SCAN_CMDID, 625 WMI_BEGIN_SCAN_CMDID,
626 626
627 WMI_SET_BLACK_LIST,
628 WMI_SET_MCASTRATE,
629
630 WMI_STA_BMISS_ENHANCE_CMDID,
627}; 631};
628 632
629enum wmi_mgmt_frame_type { 633enum wmi_mgmt_frame_type {
@@ -960,6 +964,9 @@ enum wmi_bss_filter {
960 /* beacons matching probed ssid */ 964 /* beacons matching probed ssid */
961 PROBED_SSID_FILTER, 965 PROBED_SSID_FILTER,
962 966
967 /* beacons matching matched ssid */
968 MATCHED_SSID_FILTER,
969
963 /* marker only */ 970 /* marker only */
964 LAST_BSS_FILTER, 971 LAST_BSS_FILTER,
965}; 972};
@@ -978,7 +985,7 @@ struct wmi_bss_filter_cmd {
978} __packed; 985} __packed;
979 986
980/* WMI_SET_PROBED_SSID_CMDID */ 987/* WMI_SET_PROBED_SSID_CMDID */
981#define MAX_PROBED_SSID_INDEX 9 988#define MAX_PROBED_SSIDS 16
982 989
983enum wmi_ssid_flag { 990enum wmi_ssid_flag {
984 /* disables entry */ 991 /* disables entry */
@@ -989,10 +996,13 @@ enum wmi_ssid_flag {
989 996
990 /* probes for any ssid */ 997 /* probes for any ssid */
991 ANY_SSID_FLAG = 0x02, 998 ANY_SSID_FLAG = 0x02,
999
1000 /* match for ssid */
1001 MATCH_SSID_FLAG = 0x08,
992}; 1002};
993 1003
994struct wmi_probed_ssid_cmd { 1004struct wmi_probed_ssid_cmd {
995 /* 0 to MAX_PROBED_SSID_INDEX */ 1005 /* 0 to MAX_PROBED_SSIDS - 1 */
996 u8 entry_index; 1006 u8 entry_index;
997 1007
998 /* see, enum wmi_ssid_flg */ 1008 /* see, enum wmi_ssid_flg */
@@ -1017,6 +1027,11 @@ struct wmi_bmiss_time_cmd {
1017 __le16 num_beacons; 1027 __le16 num_beacons;
1018}; 1028};
1019 1029
1030/* WMI_STA_ENHANCE_BMISS_CMDID */
1031struct wmi_sta_bmiss_enhance_cmd {
1032 u8 enable;
1033} __packed;
1034
1020/* WMI_SET_POWER_MODE_CMDID */ 1035/* WMI_SET_POWER_MODE_CMDID */
1021enum wmi_power_mode { 1036enum wmi_power_mode {
1022 REC_POWER = 0x01, 1037 REC_POWER = 0x01,
@@ -1048,6 +1063,36 @@ struct wmi_power_params_cmd {
1048 __le16 ps_fail_event_policy; 1063 __le16 ps_fail_event_policy;
1049} __packed; 1064} __packed;
1050 1065
1066/*
1067 * Ratemask for below modes should be passed
1068 * to WMI_SET_TX_SELECT_RATES_CMDID.
1069 * AR6003 has 32 bit mask for each modes.
1070 * First 12 bits for legacy rates, 13 to 20
1071 * bits for HT 20 rates and 21 to 28 bits for
1072 * HT 40 rates
1073 */
1074enum wmi_mode_phy {
1075 WMI_RATES_MODE_11A = 0,
1076 WMI_RATES_MODE_11G,
1077 WMI_RATES_MODE_11B,
1078 WMI_RATES_MODE_11GONLY,
1079 WMI_RATES_MODE_11A_HT20,
1080 WMI_RATES_MODE_11G_HT20,
1081 WMI_RATES_MODE_11A_HT40,
1082 WMI_RATES_MODE_11G_HT40,
1083 WMI_RATES_MODE_MAX
1084};
1085
1086/* WMI_SET_TX_SELECT_RATES_CMDID */
1087struct wmi_set_tx_select_rates32_cmd {
1088 __le32 ratemask[WMI_RATES_MODE_MAX];
1089} __packed;
1090
1091/* WMI_SET_TX_SELECT_RATES_CMDID */
1092struct wmi_set_tx_select_rates64_cmd {
1093 __le64 ratemask[WMI_RATES_MODE_MAX];
1094} __packed;
1095
1051/* WMI_SET_DISC_TIMEOUT_CMDID */ 1096/* WMI_SET_DISC_TIMEOUT_CMDID */
1052struct wmi_disc_timeout_cmd { 1097struct wmi_disc_timeout_cmd {
1053 /* seconds */ 1098 /* seconds */
@@ -1572,6 +1617,10 @@ struct roam_ctrl_cmd {
1572 u8 roam_ctrl; 1617 u8 roam_ctrl;
1573} __packed; 1618} __packed;
1574 1619
1620struct set_dtim_cmd {
1621 __le32 dtim_period;
1622} __packed;
1623
1575/* BSS INFO HDR version 2.0 */ 1624/* BSS INFO HDR version 2.0 */
1576struct wmi_bss_info_hdr2 { 1625struct wmi_bss_info_hdr2 {
1577 __le16 ch; /* frequency in MHz */ 1626 __le16 ch; /* frequency in MHz */
@@ -2532,6 +2581,8 @@ int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, u8 if_idx,
2532 __be32 ips0, __be32 ips1); 2581 __be32 ips0, __be32 ips1);
2533int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx, 2582int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
2534 enum ath6kl_host_mode host_mode); 2583 enum ath6kl_host_mode host_mode);
2584int ath6kl_wmi_set_bitrate_mask(struct wmi *wmi, u8 if_idx,
2585 const struct cfg80211_bitrate_mask *mask);
2535int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx, 2586int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx,
2536 enum ath6kl_wow_mode wow_mode, 2587 enum ath6kl_wow_mode wow_mode,
2537 u32 filter, u16 host_req_delay); 2588 u32 filter, u16 host_req_delay);
@@ -2542,11 +2593,14 @@ int ath6kl_wmi_add_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
2542int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx, 2593int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
2543 u16 list_id, u16 filter_id); 2594 u16 list_id, u16 filter_id);
2544int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi); 2595int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi);
2596int ath6kl_wmi_ap_set_dtim_cmd(struct wmi *wmi, u8 if_idx, u32 dtim_period);
2545int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid); 2597int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid);
2546int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode); 2598int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode);
2547int ath6kl_wmi_mcast_filter_cmd(struct wmi *wmi, u8 if_idx, bool mc_all_on); 2599int ath6kl_wmi_mcast_filter_cmd(struct wmi *wmi, u8 if_idx, bool mc_all_on);
2548int ath6kl_wmi_add_del_mcast_filter_cmd(struct wmi *wmi, u8 if_idx, 2600int ath6kl_wmi_add_del_mcast_filter_cmd(struct wmi *wmi, u8 if_idx,
2549 u8 *filter, bool add_filter); 2601 u8 *filter, bool add_filter);
2602int ath6kl_wmi_sta_bmiss_enhance_cmd(struct wmi *wmi, u8 if_idx, bool enable);
2603
2550/* AP mode uAPSD */ 2604/* AP mode uAPSD */
2551int ath6kl_wmi_ap_set_apsd(struct wmi *wmi, u8 if_idx, u8 enable); 2605int ath6kl_wmi_ap_set_apsd(struct wmi *wmi, u8 if_idx, u8 enable);
2552 2606
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 3f0b84723789..9c41232b0cd0 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -3,7 +3,9 @@ ath9k-y += beacon.o \
3 init.o \ 3 init.o \
4 main.o \ 4 main.o \
5 recv.o \ 5 recv.o \
6 xmit.o 6 xmit.o \
7 link.o \
8 antenna.o
7 9
8ath9k-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += mci.o 10ath9k-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += mci.o
9ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o 11ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 5e47ca6d16a8..3a69804f4c16 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -35,6 +35,10 @@ static const struct platform_device_id ath9k_platform_id_table[] = {
35 .name = "ar934x_wmac", 35 .name = "ar934x_wmac",
36 .driver_data = AR9300_DEVID_AR9340, 36 .driver_data = AR9300_DEVID_AR9340,
37 }, 37 },
38 {
39 .name = "qca955x_wmac",
40 .driver_data = AR9300_DEVID_QCA955X,
41 },
38 {}, 42 {},
39}; 43};
40 44
@@ -126,7 +130,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
126 sc->irq = irq; 130 sc->irq = irq;
127 131
128 /* Will be cleared in ath9k_start() */ 132 /* Will be cleared in ath9k_start() */
129 sc->sc_flags |= SC_OP_INVALID; 133 set_bit(SC_OP_INVALID, &sc->sc_flags);
130 134
131 ret = request_irq(irq, ath_isr, IRQF_SHARED, "ath9k", sc); 135 ret = request_irq(irq, ath_isr, IRQF_SHARED, "ath9k", sc);
132 if (ret) { 136 if (ret) {
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index b4c77f9d7470..ff007f500feb 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -104,11 +104,6 @@ static const struct ani_cck_level_entry cck_level_table[] = {
104#define ATH9K_ANI_CCK_DEF_LEVEL \ 104#define ATH9K_ANI_CCK_DEF_LEVEL \
105 2 /* default level - matches the INI settings */ 105 2 /* default level - matches the INI settings */
106 106
107static bool use_new_ani(struct ath_hw *ah)
108{
109 return AR_SREV_9300_20_OR_LATER(ah) || modparam_force_new_ani;
110}
111
112static void ath9k_hw_update_mibstats(struct ath_hw *ah, 107static void ath9k_hw_update_mibstats(struct ath_hw *ah,
113 struct ath9k_mib_stats *stats) 108 struct ath9k_mib_stats *stats)
114{ 109{
@@ -122,8 +117,6 @@ static void ath9k_hw_update_mibstats(struct ath_hw *ah,
122static void ath9k_ani_restart(struct ath_hw *ah) 117static void ath9k_ani_restart(struct ath_hw *ah)
123{ 118{
124 struct ar5416AniState *aniState; 119 struct ar5416AniState *aniState;
125 struct ath_common *common = ath9k_hw_common(ah);
126 u32 ofdm_base = 0, cck_base = 0;
127 120
128 if (!DO_ANI(ah)) 121 if (!DO_ANI(ah))
129 return; 122 return;
@@ -131,18 +124,10 @@ static void ath9k_ani_restart(struct ath_hw *ah)
131 aniState = &ah->curchan->ani; 124 aniState = &ah->curchan->ani;
132 aniState->listenTime = 0; 125 aniState->listenTime = 0;
133 126
134 if (!use_new_ani(ah)) {
135 ofdm_base = AR_PHY_COUNTMAX - ah->config.ofdm_trig_high;
136 cck_base = AR_PHY_COUNTMAX - ah->config.cck_trig_high;
137 }
138
139 ath_dbg(common, ANI, "Writing ofdmbase=%u cckbase=%u\n",
140 ofdm_base, cck_base);
141
142 ENABLE_REGWRITE_BUFFER(ah); 127 ENABLE_REGWRITE_BUFFER(ah);
143 128
144 REG_WRITE(ah, AR_PHY_ERR_1, ofdm_base); 129 REG_WRITE(ah, AR_PHY_ERR_1, 0);
145 REG_WRITE(ah, AR_PHY_ERR_2, cck_base); 130 REG_WRITE(ah, AR_PHY_ERR_2, 0);
146 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING); 131 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
147 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING); 132 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
148 133
@@ -154,129 +139,23 @@ static void ath9k_ani_restart(struct ath_hw *ah)
154 aniState->cckPhyErrCount = 0; 139 aniState->cckPhyErrCount = 0;
155} 140}
156 141
157static void ath9k_hw_ani_ofdm_err_trigger_old(struct ath_hw *ah)
158{
159 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
160 struct ar5416AniState *aniState;
161 int32_t rssi;
162
163 aniState = &ah->curchan->ani;
164
165 if (aniState->noiseImmunityLevel < HAL_NOISE_IMMUNE_MAX) {
166 if (ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
167 aniState->noiseImmunityLevel + 1)) {
168 return;
169 }
170 }
171
172 if (aniState->spurImmunityLevel < HAL_SPUR_IMMUNE_MAX) {
173 if (ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
174 aniState->spurImmunityLevel + 1)) {
175 return;
176 }
177 }
178
179 if (ah->opmode == NL80211_IFTYPE_AP) {
180 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) {
181 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
182 aniState->firstepLevel + 1);
183 }
184 return;
185 }
186 rssi = BEACON_RSSI(ah);
187 if (rssi > aniState->rssiThrHigh) {
188 if (!aniState->ofdmWeakSigDetectOff) {
189 if (ath9k_hw_ani_control(ah,
190 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
191 false)) {
192 ath9k_hw_ani_control(ah,
193 ATH9K_ANI_SPUR_IMMUNITY_LEVEL, 0);
194 return;
195 }
196 }
197 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) {
198 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
199 aniState->firstepLevel + 1);
200 return;
201 }
202 } else if (rssi > aniState->rssiThrLow) {
203 if (aniState->ofdmWeakSigDetectOff)
204 ath9k_hw_ani_control(ah,
205 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
206 true);
207 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX)
208 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
209 aniState->firstepLevel + 1);
210 return;
211 } else {
212 if ((conf->channel->band == IEEE80211_BAND_2GHZ) &&
213 !conf_is_ht(conf)) {
214 if (!aniState->ofdmWeakSigDetectOff)
215 ath9k_hw_ani_control(ah,
216 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
217 false);
218 if (aniState->firstepLevel > 0)
219 ath9k_hw_ani_control(ah,
220 ATH9K_ANI_FIRSTEP_LEVEL, 0);
221 return;
222 }
223 }
224}
225
226static void ath9k_hw_ani_cck_err_trigger_old(struct ath_hw *ah)
227{
228 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
229 struct ar5416AniState *aniState;
230 int32_t rssi;
231
232 aniState = &ah->curchan->ani;
233 if (aniState->noiseImmunityLevel < HAL_NOISE_IMMUNE_MAX) {
234 if (ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
235 aniState->noiseImmunityLevel + 1)) {
236 return;
237 }
238 }
239 if (ah->opmode == NL80211_IFTYPE_AP) {
240 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) {
241 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
242 aniState->firstepLevel + 1);
243 }
244 return;
245 }
246 rssi = BEACON_RSSI(ah);
247 if (rssi > aniState->rssiThrLow) {
248 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX)
249 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
250 aniState->firstepLevel + 1);
251 } else {
252 if ((conf->channel->band == IEEE80211_BAND_2GHZ) &&
253 !conf_is_ht(conf)) {
254 if (aniState->firstepLevel > 0)
255 ath9k_hw_ani_control(ah,
256 ATH9K_ANI_FIRSTEP_LEVEL, 0);
257 }
258 }
259}
260
261/* Adjust the OFDM Noise Immunity Level */ 142/* Adjust the OFDM Noise Immunity Level */
262static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel) 143static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel,
144 bool scan)
263{ 145{
264 struct ar5416AniState *aniState = &ah->curchan->ani; 146 struct ar5416AniState *aniState = &ah->curchan->ani;
265 struct ath_common *common = ath9k_hw_common(ah); 147 struct ath_common *common = ath9k_hw_common(ah);
266 const struct ani_ofdm_level_entry *entry_ofdm; 148 const struct ani_ofdm_level_entry *entry_ofdm;
267 const struct ani_cck_level_entry *entry_cck; 149 const struct ani_cck_level_entry *entry_cck;
268 150 bool weak_sig;
269 aniState->noiseFloor = BEACON_RSSI(ah);
270 151
271 ath_dbg(common, ANI, "**** ofdmlevel %d=>%d, rssi=%d[lo=%d hi=%d]\n", 152 ath_dbg(common, ANI, "**** ofdmlevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
272 aniState->ofdmNoiseImmunityLevel, 153 aniState->ofdmNoiseImmunityLevel,
273 immunityLevel, aniState->noiseFloor, 154 immunityLevel, BEACON_RSSI(ah),
274 aniState->rssiThrLow, aniState->rssiThrHigh); 155 aniState->rssiThrLow, aniState->rssiThrHigh);
275 156
276 if (aniState->update_ani) 157 if (!scan)
277 aniState->ofdmNoiseImmunityLevel = 158 aniState->ofdmNoiseImmunityLevel = immunityLevel;
278 (immunityLevel > ATH9K_ANI_OFDM_DEF_LEVEL) ?
279 immunityLevel : ATH9K_ANI_OFDM_DEF_LEVEL;
280 159
281 entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel]; 160 entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel];
282 entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel]; 161 entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel];
@@ -292,12 +171,22 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel)
292 ATH9K_ANI_FIRSTEP_LEVEL, 171 ATH9K_ANI_FIRSTEP_LEVEL,
293 entry_ofdm->fir_step_level); 172 entry_ofdm->fir_step_level);
294 173
295 if ((aniState->noiseFloor >= aniState->rssiThrHigh) && 174 weak_sig = entry_ofdm->ofdm_weak_signal_on;
296 (!aniState->ofdmWeakSigDetectOff != 175 if (ah->opmode == NL80211_IFTYPE_STATION &&
297 entry_ofdm->ofdm_weak_signal_on)) { 176 BEACON_RSSI(ah) <= aniState->rssiThrHigh)
177 weak_sig = true;
178
179 if (aniState->ofdmWeakSigDetect != weak_sig)
298 ath9k_hw_ani_control(ah, 180 ath9k_hw_ani_control(ah,
299 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION, 181 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
300 entry_ofdm->ofdm_weak_signal_on); 182 entry_ofdm->ofdm_weak_signal_on);
183
184 if (aniState->ofdmNoiseImmunityLevel >= ATH9K_ANI_OFDM_DEF_LEVEL) {
185 ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH;
186 ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI;
187 } else {
188 ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI;
189 ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW;
301 } 190 }
302} 191}
303 192
@@ -308,43 +197,35 @@ static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah)
308 if (!DO_ANI(ah)) 197 if (!DO_ANI(ah))
309 return; 198 return;
310 199
311 if (!use_new_ani(ah)) {
312 ath9k_hw_ani_ofdm_err_trigger_old(ah);
313 return;
314 }
315
316 aniState = &ah->curchan->ani; 200 aniState = &ah->curchan->ani;
317 201
318 if (aniState->ofdmNoiseImmunityLevel < ATH9K_ANI_OFDM_MAX_LEVEL) 202 if (aniState->ofdmNoiseImmunityLevel < ATH9K_ANI_OFDM_MAX_LEVEL)
319 ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel + 1); 203 ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel + 1, false);
320} 204}
321 205
322/* 206/*
323 * Set the ANI settings to match an CCK level. 207 * Set the ANI settings to match an CCK level.
324 */ 208 */
325static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel) 209static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel,
210 bool scan)
326{ 211{
327 struct ar5416AniState *aniState = &ah->curchan->ani; 212 struct ar5416AniState *aniState = &ah->curchan->ani;
328 struct ath_common *common = ath9k_hw_common(ah); 213 struct ath_common *common = ath9k_hw_common(ah);
329 const struct ani_ofdm_level_entry *entry_ofdm; 214 const struct ani_ofdm_level_entry *entry_ofdm;
330 const struct ani_cck_level_entry *entry_cck; 215 const struct ani_cck_level_entry *entry_cck;
331 216
332 aniState->noiseFloor = BEACON_RSSI(ah);
333 ath_dbg(common, ANI, "**** ccklevel %d=>%d, rssi=%d[lo=%d hi=%d]\n", 217 ath_dbg(common, ANI, "**** ccklevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
334 aniState->cckNoiseImmunityLevel, immunityLevel, 218 aniState->cckNoiseImmunityLevel, immunityLevel,
335 aniState->noiseFloor, aniState->rssiThrLow, 219 BEACON_RSSI(ah), aniState->rssiThrLow,
336 aniState->rssiThrHigh); 220 aniState->rssiThrHigh);
337 221
338 if ((ah->opmode == NL80211_IFTYPE_STATION || 222 if (ah->opmode == NL80211_IFTYPE_STATION &&
339 ah->opmode == NL80211_IFTYPE_ADHOC) && 223 BEACON_RSSI(ah) <= aniState->rssiThrLow &&
340 aniState->noiseFloor <= aniState->rssiThrLow &&
341 immunityLevel > ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI) 224 immunityLevel > ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI)
342 immunityLevel = ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI; 225 immunityLevel = ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI;
343 226
344 if (aniState->update_ani) 227 if (!scan)
345 aniState->cckNoiseImmunityLevel = 228 aniState->cckNoiseImmunityLevel = immunityLevel;
346 (immunityLevel > ATH9K_ANI_CCK_DEF_LEVEL) ?
347 immunityLevel : ATH9K_ANI_CCK_DEF_LEVEL;
348 229
349 entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel]; 230 entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel];
350 entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel]; 231 entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel];
@@ -359,7 +240,7 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel)
359 if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9485(ah)) 240 if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9485(ah))
360 return; 241 return;
361 242
362 if (aniState->mrcCCKOff == entry_cck->mrc_cck_on) 243 if (aniState->mrcCCK != entry_cck->mrc_cck_on)
363 ath9k_hw_ani_control(ah, 244 ath9k_hw_ani_control(ah,
364 ATH9K_ANI_MRC_CCK, 245 ATH9K_ANI_MRC_CCK,
365 entry_cck->mrc_cck_on); 246 entry_cck->mrc_cck_on);
@@ -372,68 +253,11 @@ static void ath9k_hw_ani_cck_err_trigger(struct ath_hw *ah)
372 if (!DO_ANI(ah)) 253 if (!DO_ANI(ah))
373 return; 254 return;
374 255
375 if (!use_new_ani(ah)) {
376 ath9k_hw_ani_cck_err_trigger_old(ah);
377 return;
378 }
379
380 aniState = &ah->curchan->ani; 256 aniState = &ah->curchan->ani;
381 257
382 if (aniState->cckNoiseImmunityLevel < ATH9K_ANI_CCK_MAX_LEVEL) 258 if (aniState->cckNoiseImmunityLevel < ATH9K_ANI_CCK_MAX_LEVEL)
383 ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel + 1); 259 ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel + 1,
384} 260 false);
385
386static void ath9k_hw_ani_lower_immunity_old(struct ath_hw *ah)
387{
388 struct ar5416AniState *aniState;
389 int32_t rssi;
390
391 aniState = &ah->curchan->ani;
392
393 if (ah->opmode == NL80211_IFTYPE_AP) {
394 if (aniState->firstepLevel > 0) {
395 if (ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
396 aniState->firstepLevel - 1))
397 return;
398 }
399 } else {
400 rssi = BEACON_RSSI(ah);
401 if (rssi > aniState->rssiThrHigh) {
402 /* XXX: Handle me */
403 } else if (rssi > aniState->rssiThrLow) {
404 if (aniState->ofdmWeakSigDetectOff) {
405 if (ath9k_hw_ani_control(ah,
406 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
407 true))
408 return;
409 }
410 if (aniState->firstepLevel > 0) {
411 if (ath9k_hw_ani_control(ah,
412 ATH9K_ANI_FIRSTEP_LEVEL,
413 aniState->firstepLevel - 1))
414 return;
415 }
416 } else {
417 if (aniState->firstepLevel > 0) {
418 if (ath9k_hw_ani_control(ah,
419 ATH9K_ANI_FIRSTEP_LEVEL,
420 aniState->firstepLevel - 1))
421 return;
422 }
423 }
424 }
425
426 if (aniState->spurImmunityLevel > 0) {
427 if (ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
428 aniState->spurImmunityLevel - 1))
429 return;
430 }
431
432 if (aniState->noiseImmunityLevel > 0) {
433 ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
434 aniState->noiseImmunityLevel - 1);
435 return;
436 }
437} 261}
438 262
439/* 263/*
@@ -446,87 +270,18 @@ static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah)
446 270
447 aniState = &ah->curchan->ani; 271 aniState = &ah->curchan->ani;
448 272
449 if (!use_new_ani(ah)) {
450 ath9k_hw_ani_lower_immunity_old(ah);
451 return;
452 }
453
454 /* lower OFDM noise immunity */ 273 /* lower OFDM noise immunity */
455 if (aniState->ofdmNoiseImmunityLevel > 0 && 274 if (aniState->ofdmNoiseImmunityLevel > 0 &&
456 (aniState->ofdmsTurn || aniState->cckNoiseImmunityLevel == 0)) { 275 (aniState->ofdmsTurn || aniState->cckNoiseImmunityLevel == 0)) {
457 ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel - 1); 276 ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel - 1,
277 false);
458 return; 278 return;
459 } 279 }
460 280
461 /* lower CCK noise immunity */ 281 /* lower CCK noise immunity */
462 if (aniState->cckNoiseImmunityLevel > 0) 282 if (aniState->cckNoiseImmunityLevel > 0)
463 ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel - 1); 283 ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel - 1,
464} 284 false);
465
466static void ath9k_ani_reset_old(struct ath_hw *ah, bool is_scanning)
467{
468 struct ar5416AniState *aniState;
469 struct ath9k_channel *chan = ah->curchan;
470 struct ath_common *common = ath9k_hw_common(ah);
471
472 if (!DO_ANI(ah))
473 return;
474
475 aniState = &ah->curchan->ani;
476
477 if (ah->opmode != NL80211_IFTYPE_STATION
478 && ah->opmode != NL80211_IFTYPE_ADHOC) {
479 ath_dbg(common, ANI, "Reset ANI state opmode %u\n", ah->opmode);
480 ah->stats.ast_ani_reset++;
481
482 if (ah->opmode == NL80211_IFTYPE_AP) {
483 /*
484 * ath9k_hw_ani_control() will only process items set on
485 * ah->ani_function
486 */
487 if (IS_CHAN_2GHZ(chan))
488 ah->ani_function = (ATH9K_ANI_SPUR_IMMUNITY_LEVEL |
489 ATH9K_ANI_FIRSTEP_LEVEL);
490 else
491 ah->ani_function = 0;
492 }
493
494 ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL, 0);
495 ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL, 0);
496 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, 0);
497 ath9k_hw_ani_control(ah, ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
498 !ATH9K_ANI_USE_OFDM_WEAK_SIG);
499 ath9k_hw_ani_control(ah, ATH9K_ANI_CCK_WEAK_SIGNAL_THR,
500 ATH9K_ANI_CCK_WEAK_SIG_THR);
501
502 ath9k_ani_restart(ah);
503 return;
504 }
505
506 if (aniState->noiseImmunityLevel != 0)
507 ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
508 aniState->noiseImmunityLevel);
509 if (aniState->spurImmunityLevel != 0)
510 ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
511 aniState->spurImmunityLevel);
512 if (aniState->ofdmWeakSigDetectOff)
513 ath9k_hw_ani_control(ah, ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
514 !aniState->ofdmWeakSigDetectOff);
515 if (aniState->cckWeakSigThreshold)
516 ath9k_hw_ani_control(ah, ATH9K_ANI_CCK_WEAK_SIGNAL_THR,
517 aniState->cckWeakSigThreshold);
518 if (aniState->firstepLevel != 0)
519 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
520 aniState->firstepLevel);
521
522 ath9k_ani_restart(ah);
523
524 ENABLE_REGWRITE_BUFFER(ah);
525
526 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
527 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
528
529 REGWRITE_BUFFER_FLUSH(ah);
530} 285}
531 286
532/* 287/*
@@ -539,13 +294,11 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
539 struct ar5416AniState *aniState = &ah->curchan->ani; 294 struct ar5416AniState *aniState = &ah->curchan->ani;
540 struct ath9k_channel *chan = ah->curchan; 295 struct ath9k_channel *chan = ah->curchan;
541 struct ath_common *common = ath9k_hw_common(ah); 296 struct ath_common *common = ath9k_hw_common(ah);
297 int ofdm_nil, cck_nil;
542 298
543 if (!DO_ANI(ah)) 299 if (!DO_ANI(ah))
544 return; 300 return;
545 301
546 if (!use_new_ani(ah))
547 return ath9k_ani_reset_old(ah, is_scanning);
548
549 BUG_ON(aniState == NULL); 302 BUG_ON(aniState == NULL);
550 ah->stats.ast_ani_reset++; 303 ah->stats.ast_ani_reset++;
551 304
@@ -563,6 +316,11 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
563 /* always allow mode (on/off) to be controlled */ 316 /* always allow mode (on/off) to be controlled */
564 ah->ani_function |= ATH9K_ANI_MODE; 317 ah->ani_function |= ATH9K_ANI_MODE;
565 318
319 ofdm_nil = max_t(int, ATH9K_ANI_OFDM_DEF_LEVEL,
320 aniState->ofdmNoiseImmunityLevel);
321 cck_nil = max_t(int, ATH9K_ANI_CCK_DEF_LEVEL,
322 aniState->cckNoiseImmunityLevel);
323
566 if (is_scanning || 324 if (is_scanning ||
567 (ah->opmode != NL80211_IFTYPE_STATION && 325 (ah->opmode != NL80211_IFTYPE_STATION &&
568 ah->opmode != NL80211_IFTYPE_ADHOC)) { 326 ah->opmode != NL80211_IFTYPE_ADHOC)) {
@@ -585,9 +343,8 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
585 aniState->ofdmNoiseImmunityLevel, 343 aniState->ofdmNoiseImmunityLevel,
586 aniState->cckNoiseImmunityLevel); 344 aniState->cckNoiseImmunityLevel);
587 345
588 aniState->update_ani = false; 346 ofdm_nil = ATH9K_ANI_OFDM_DEF_LEVEL;
589 ath9k_hw_set_ofdm_nil(ah, ATH9K_ANI_OFDM_DEF_LEVEL); 347 cck_nil = ATH9K_ANI_CCK_DEF_LEVEL;
590 ath9k_hw_set_cck_nil(ah, ATH9K_ANI_CCK_DEF_LEVEL);
591 } 348 }
592 } else { 349 } else {
593 /* 350 /*
@@ -601,13 +358,9 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
601 is_scanning, 358 is_scanning,
602 aniState->ofdmNoiseImmunityLevel, 359 aniState->ofdmNoiseImmunityLevel,
603 aniState->cckNoiseImmunityLevel); 360 aniState->cckNoiseImmunityLevel);
604
605 aniState->update_ani = true;
606 ath9k_hw_set_ofdm_nil(ah,
607 aniState->ofdmNoiseImmunityLevel);
608 ath9k_hw_set_cck_nil(ah,
609 aniState->cckNoiseImmunityLevel);
610 } 361 }
362 ath9k_hw_set_ofdm_nil(ah, ofdm_nil, is_scanning);
363 ath9k_hw_set_cck_nil(ah, cck_nil, is_scanning);
611 364
612 /* 365 /*
613 * enable phy counters if hw supports or if not, enable phy 366 * enable phy counters if hw supports or if not, enable phy
@@ -627,9 +380,6 @@ static bool ath9k_hw_ani_read_counters(struct ath_hw *ah)
627{ 380{
628 struct ath_common *common = ath9k_hw_common(ah); 381 struct ath_common *common = ath9k_hw_common(ah);
629 struct ar5416AniState *aniState = &ah->curchan->ani; 382 struct ar5416AniState *aniState = &ah->curchan->ani;
630 u32 ofdm_base = 0;
631 u32 cck_base = 0;
632 u32 ofdmPhyErrCnt, cckPhyErrCnt;
633 u32 phyCnt1, phyCnt2; 383 u32 phyCnt1, phyCnt2;
634 int32_t listenTime; 384 int32_t listenTime;
635 385
@@ -642,11 +392,6 @@ static bool ath9k_hw_ani_read_counters(struct ath_hw *ah)
642 return false; 392 return false;
643 } 393 }
644 394
645 if (!use_new_ani(ah)) {
646 ofdm_base = AR_PHY_COUNTMAX - ah->config.ofdm_trig_high;
647 cck_base = AR_PHY_COUNTMAX - ah->config.cck_trig_high;
648 }
649
650 aniState->listenTime += listenTime; 395 aniState->listenTime += listenTime;
651 396
652 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats); 397 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
@@ -654,35 +399,12 @@ static bool ath9k_hw_ani_read_counters(struct ath_hw *ah)
654 phyCnt1 = REG_READ(ah, AR_PHY_ERR_1); 399 phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
655 phyCnt2 = REG_READ(ah, AR_PHY_ERR_2); 400 phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
656 401
657 if (!use_new_ani(ah) && (phyCnt1 < ofdm_base || phyCnt2 < cck_base)) { 402 ah->stats.ast_ani_ofdmerrs += phyCnt1 - aniState->ofdmPhyErrCount;
658 if (phyCnt1 < ofdm_base) { 403 aniState->ofdmPhyErrCount = phyCnt1;
659 ath_dbg(common, ANI,
660 "phyCnt1 0x%x, resetting counter value to 0x%x\n",
661 phyCnt1, ofdm_base);
662 REG_WRITE(ah, AR_PHY_ERR_1, ofdm_base);
663 REG_WRITE(ah, AR_PHY_ERR_MASK_1,
664 AR_PHY_ERR_OFDM_TIMING);
665 }
666 if (phyCnt2 < cck_base) {
667 ath_dbg(common, ANI,
668 "phyCnt2 0x%x, resetting counter value to 0x%x\n",
669 phyCnt2, cck_base);
670 REG_WRITE(ah, AR_PHY_ERR_2, cck_base);
671 REG_WRITE(ah, AR_PHY_ERR_MASK_2,
672 AR_PHY_ERR_CCK_TIMING);
673 }
674 return false;
675 }
676 404
677 ofdmPhyErrCnt = phyCnt1 - ofdm_base; 405 ah->stats.ast_ani_cckerrs += phyCnt2 - aniState->cckPhyErrCount;
678 ah->stats.ast_ani_ofdmerrs += 406 aniState->cckPhyErrCount = phyCnt2;
679 ofdmPhyErrCnt - aniState->ofdmPhyErrCount;
680 aniState->ofdmPhyErrCount = ofdmPhyErrCnt;
681 407
682 cckPhyErrCnt = phyCnt2 - cck_base;
683 ah->stats.ast_ani_cckerrs +=
684 cckPhyErrCnt - aniState->cckPhyErrCount;
685 aniState->cckPhyErrCount = cckPhyErrCnt;
686 return true; 408 return true;
687} 409}
688 410
@@ -716,21 +438,10 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan)
716 438
717 if (aniState->listenTime > ah->aniperiod) { 439 if (aniState->listenTime > ah->aniperiod) {
718 if (cckPhyErrRate < ah->config.cck_trig_low && 440 if (cckPhyErrRate < ah->config.cck_trig_low &&
719 ((ofdmPhyErrRate < ah->config.ofdm_trig_low && 441 ofdmPhyErrRate < ah->config.ofdm_trig_low) {
720 aniState->ofdmNoiseImmunityLevel <
721 ATH9K_ANI_OFDM_DEF_LEVEL) ||
722 (ofdmPhyErrRate < ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI &&
723 aniState->ofdmNoiseImmunityLevel >=
724 ATH9K_ANI_OFDM_DEF_LEVEL))) {
725 ath9k_hw_ani_lower_immunity(ah); 442 ath9k_hw_ani_lower_immunity(ah);
726 aniState->ofdmsTurn = !aniState->ofdmsTurn; 443 aniState->ofdmsTurn = !aniState->ofdmsTurn;
727 } else if ((ofdmPhyErrRate > ah->config.ofdm_trig_high && 444 } else if (ofdmPhyErrRate > ah->config.ofdm_trig_high) {
728 aniState->ofdmNoiseImmunityLevel >=
729 ATH9K_ANI_OFDM_DEF_LEVEL) ||
730 (ofdmPhyErrRate >
731 ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI &&
732 aniState->ofdmNoiseImmunityLevel <
733 ATH9K_ANI_OFDM_DEF_LEVEL)) {
734 ath9k_hw_ani_ofdm_err_trigger(ah); 445 ath9k_hw_ani_ofdm_err_trigger(ah);
735 aniState->ofdmsTurn = false; 446 aniState->ofdmsTurn = false;
736 } else if (cckPhyErrRate > ah->config.cck_trig_high) { 447 } else if (cckPhyErrRate > ah->config.cck_trig_high) {
@@ -778,49 +489,6 @@ void ath9k_hw_disable_mib_counters(struct ath_hw *ah)
778} 489}
779EXPORT_SYMBOL(ath9k_hw_disable_mib_counters); 490EXPORT_SYMBOL(ath9k_hw_disable_mib_counters);
780 491
781/*
782 * Process a MIB interrupt. We may potentially be invoked because
783 * any of the MIB counters overflow/trigger so don't assume we're
784 * here because a PHY error counter triggered.
785 */
786void ath9k_hw_proc_mib_event(struct ath_hw *ah)
787{
788 u32 phyCnt1, phyCnt2;
789
790 /* Reset these counters regardless */
791 REG_WRITE(ah, AR_FILT_OFDM, 0);
792 REG_WRITE(ah, AR_FILT_CCK, 0);
793 if (!(REG_READ(ah, AR_SLP_MIB_CTRL) & AR_SLP_MIB_PENDING))
794 REG_WRITE(ah, AR_SLP_MIB_CTRL, AR_SLP_MIB_CLEAR);
795
796 /* Clear the mib counters and save them in the stats */
797 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
798
799 if (!DO_ANI(ah)) {
800 /*
801 * We must always clear the interrupt cause by
802 * resetting the phy error regs.
803 */
804 REG_WRITE(ah, AR_PHY_ERR_1, 0);
805 REG_WRITE(ah, AR_PHY_ERR_2, 0);
806 return;
807 }
808
809 /* NB: these are not reset-on-read */
810 phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
811 phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
812 if (((phyCnt1 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK) ||
813 ((phyCnt2 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK)) {
814
815 if (!use_new_ani(ah))
816 ath9k_hw_ani_read_counters(ah);
817
818 /* NB: always restart to insure the h/w counters are reset */
819 ath9k_ani_restart(ah);
820 }
821}
822EXPORT_SYMBOL(ath9k_hw_proc_mib_event);
823
824void ath9k_hw_ani_setup(struct ath_hw *ah) 492void ath9k_hw_ani_setup(struct ath_hw *ah)
825{ 493{
826 int i; 494 int i;
@@ -845,66 +513,37 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
845 513
846 ath_dbg(common, ANI, "Initialize ANI\n"); 514 ath_dbg(common, ANI, "Initialize ANI\n");
847 515
848 if (use_new_ani(ah)) { 516 ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH;
849 ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH_NEW; 517 ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW;
850 ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW_NEW;
851 518
852 ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH_NEW; 519 ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH;
853 ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW_NEW; 520 ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW;
854 } else {
855 ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH_OLD;
856 ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW_OLD;
857
858 ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH_OLD;
859 ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW_OLD;
860 }
861 521
862 for (i = 0; i < ARRAY_SIZE(ah->channels); i++) { 522 for (i = 0; i < ARRAY_SIZE(ah->channels); i++) {
863 struct ath9k_channel *chan = &ah->channels[i]; 523 struct ath9k_channel *chan = &ah->channels[i];
864 struct ar5416AniState *ani = &chan->ani; 524 struct ar5416AniState *ani = &chan->ani;
865 525
866 if (use_new_ani(ah)) { 526 ani->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
867 ani->spurImmunityLevel =
868 ATH9K_ANI_SPUR_IMMUNE_LVL_NEW;
869 527
870 ani->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW; 528 ani->firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
871 529
872 if (AR_SREV_9300_20_OR_LATER(ah)) 530 ani->mrcCCK = AR_SREV_9300_20_OR_LATER(ah) ? true : false;
873 ani->mrcCCKOff = 531
874 !ATH9K_ANI_ENABLE_MRC_CCK; 532 ani->ofdmsTurn = true;
875 else
876 ani->mrcCCKOff = true;
877
878 ani->ofdmsTurn = true;
879 } else {
880 ani->spurImmunityLevel =
881 ATH9K_ANI_SPUR_IMMUNE_LVL_OLD;
882 ani->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_OLD;
883
884 ani->cckWeakSigThreshold =
885 ATH9K_ANI_CCK_WEAK_SIG_THR;
886 }
887 533
888 ani->rssiThrHigh = ATH9K_ANI_RSSI_THR_HIGH; 534 ani->rssiThrHigh = ATH9K_ANI_RSSI_THR_HIGH;
889 ani->rssiThrLow = ATH9K_ANI_RSSI_THR_LOW; 535 ani->rssiThrLow = ATH9K_ANI_RSSI_THR_LOW;
890 ani->ofdmWeakSigDetectOff = 536 ani->ofdmWeakSigDetect = ATH9K_ANI_USE_OFDM_WEAK_SIG;
891 !ATH9K_ANI_USE_OFDM_WEAK_SIG;
892 ani->cckNoiseImmunityLevel = ATH9K_ANI_CCK_DEF_LEVEL; 537 ani->cckNoiseImmunityLevel = ATH9K_ANI_CCK_DEF_LEVEL;
893 ani->ofdmNoiseImmunityLevel = ATH9K_ANI_OFDM_DEF_LEVEL; 538 ani->ofdmNoiseImmunityLevel = ATH9K_ANI_OFDM_DEF_LEVEL;
894 ani->update_ani = false;
895 } 539 }
896 540
897 /* 541 /*
898 * since we expect some ongoing maintenance on the tables, let's sanity 542 * since we expect some ongoing maintenance on the tables, let's sanity
899 * check here default level should not modify INI setting. 543 * check here default level should not modify INI setting.
900 */ 544 */
901 if (use_new_ani(ah)) { 545 ah->aniperiod = ATH9K_ANI_PERIOD;
902 ah->aniperiod = ATH9K_ANI_PERIOD_NEW; 546 ah->config.ani_poll_interval = ATH9K_ANI_POLLINTERVAL;
903 ah->config.ani_poll_interval = ATH9K_ANI_POLLINTERVAL_NEW;
904 } else {
905 ah->aniperiod = ATH9K_ANI_PERIOD_OLD;
906 ah->config.ani_poll_interval = ATH9K_ANI_POLLINTERVAL_OLD;
907 }
908 547
909 if (ah->config.enable_ani) 548 if (ah->config.enable_ani)
910 ah->proc_phyerr |= HAL_PROCESS_ANI; 549 ah->proc_phyerr |= HAL_PROCESS_ANI;
diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h
index 72e2b874e179..1485bf5e3518 100644
--- a/drivers/net/wireless/ath/ath9k/ani.h
+++ b/drivers/net/wireless/ath/ath9k/ani.h
@@ -24,42 +24,34 @@
24#define BEACON_RSSI(ahp) (ahp->stats.avgbrssi) 24#define BEACON_RSSI(ahp) (ahp->stats.avgbrssi)
25 25
26/* units are errors per second */ 26/* units are errors per second */
27#define ATH9K_ANI_OFDM_TRIG_HIGH_OLD 500 27#define ATH9K_ANI_OFDM_TRIG_HIGH 3500
28#define ATH9K_ANI_OFDM_TRIG_HIGH_NEW 3500
29#define ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI 1000 28#define ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI 1000
30 29
31/* units are errors per second */ 30/* units are errors per second */
32#define ATH9K_ANI_OFDM_TRIG_LOW_OLD 200 31#define ATH9K_ANI_OFDM_TRIG_LOW 400
33#define ATH9K_ANI_OFDM_TRIG_LOW_NEW 400
34#define ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI 900 32#define ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI 900
35 33
36/* units are errors per second */ 34/* units are errors per second */
37#define ATH9K_ANI_CCK_TRIG_HIGH_OLD 200 35#define ATH9K_ANI_CCK_TRIG_HIGH 600
38#define ATH9K_ANI_CCK_TRIG_HIGH_NEW 600
39 36
40/* units are errors per second */ 37/* units are errors per second */
41#define ATH9K_ANI_CCK_TRIG_LOW_OLD 100 38#define ATH9K_ANI_CCK_TRIG_LOW 300
42#define ATH9K_ANI_CCK_TRIG_LOW_NEW 300
43 39
44#define ATH9K_ANI_NOISE_IMMUNE_LVL 4 40#define ATH9K_ANI_NOISE_IMMUNE_LVL 4
45#define ATH9K_ANI_USE_OFDM_WEAK_SIG true 41#define ATH9K_ANI_USE_OFDM_WEAK_SIG true
46#define ATH9K_ANI_CCK_WEAK_SIG_THR false 42#define ATH9K_ANI_CCK_WEAK_SIG_THR false
47 43
48#define ATH9K_ANI_SPUR_IMMUNE_LVL_OLD 7 44#define ATH9K_ANI_SPUR_IMMUNE_LVL 3
49#define ATH9K_ANI_SPUR_IMMUNE_LVL_NEW 3
50 45
51#define ATH9K_ANI_FIRSTEP_LVL_OLD 0 46#define ATH9K_ANI_FIRSTEP_LVL 2
52#define ATH9K_ANI_FIRSTEP_LVL_NEW 2
53 47
54#define ATH9K_ANI_RSSI_THR_HIGH 40 48#define ATH9K_ANI_RSSI_THR_HIGH 40
55#define ATH9K_ANI_RSSI_THR_LOW 7 49#define ATH9K_ANI_RSSI_THR_LOW 7
56 50
57#define ATH9K_ANI_PERIOD_OLD 100 51#define ATH9K_ANI_PERIOD 300
58#define ATH9K_ANI_PERIOD_NEW 300
59 52
60/* in ms */ 53/* in ms */
61#define ATH9K_ANI_POLLINTERVAL_OLD 100 54#define ATH9K_ANI_POLLINTERVAL 1000
62#define ATH9K_ANI_POLLINTERVAL_NEW 1000
63 55
64#define HAL_NOISE_IMMUNE_MAX 4 56#define HAL_NOISE_IMMUNE_MAX 4
65#define HAL_SPUR_IMMUNE_MAX 7 57#define HAL_SPUR_IMMUNE_MAX 7
@@ -70,8 +62,6 @@
70#define ATH9K_SIG_SPUR_IMM_SETTING_MIN 0 62#define ATH9K_SIG_SPUR_IMM_SETTING_MIN 0
71#define ATH9K_SIG_SPUR_IMM_SETTING_MAX 22 63#define ATH9K_SIG_SPUR_IMM_SETTING_MAX 22
72 64
73#define ATH9K_ANI_ENABLE_MRC_CCK true
74
75/* values here are relative to the INI */ 65/* values here are relative to the INI */
76 66
77enum ath9k_ani_cmd { 67enum ath9k_ani_cmd {
@@ -119,16 +109,14 @@ struct ar5416AniState {
119 u8 ofdmNoiseImmunityLevel; 109 u8 ofdmNoiseImmunityLevel;
120 u8 cckNoiseImmunityLevel; 110 u8 cckNoiseImmunityLevel;
121 bool ofdmsTurn; 111 bool ofdmsTurn;
122 u8 mrcCCKOff; 112 u8 mrcCCK;
123 u8 spurImmunityLevel; 113 u8 spurImmunityLevel;
124 u8 firstepLevel; 114 u8 firstepLevel;
125 u8 ofdmWeakSigDetectOff; 115 u8 ofdmWeakSigDetect;
126 u8 cckWeakSigThreshold; 116 u8 cckWeakSigThreshold;
127 bool update_ani;
128 u32 listenTime; 117 u32 listenTime;
129 int32_t rssiThrLow; 118 int32_t rssiThrLow;
130 int32_t rssiThrHigh; 119 int32_t rssiThrHigh;
131 u32 noiseFloor;
132 u32 ofdmPhyErrCount; 120 u32 ofdmPhyErrCount;
133 u32 cckPhyErrCount; 121 u32 cckPhyErrCount;
134 int16_t pktRssi[2]; 122 int16_t pktRssi[2];
diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c
new file mode 100644
index 000000000000..bbcfeb3b2a60
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/antenna.c
@@ -0,0 +1,776 @@
1/*
2 * Copyright (c) 2012 Qualcomm Atheros, Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "ath9k.h"
18
19static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta,
20 int mindelta, int main_rssi_avg,
21 int alt_rssi_avg, int pkt_count)
22{
23 return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
24 (alt_rssi_avg > main_rssi_avg + maxdelta)) ||
25 (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50);
26}
27
28static inline bool ath_ant_div_comb_alt_check(u8 div_group, int alt_ratio,
29 int curr_main_set, int curr_alt_set,
30 int alt_rssi_avg, int main_rssi_avg)
31{
32 bool result = false;
33 switch (div_group) {
34 case 0:
35 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
36 result = true;
37 break;
38 case 1:
39 case 2:
40 if ((((curr_main_set == ATH_ANT_DIV_COMB_LNA2) &&
41 (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) &&
42 (alt_rssi_avg >= (main_rssi_avg - 5))) ||
43 ((curr_main_set == ATH_ANT_DIV_COMB_LNA1) &&
44 (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) &&
45 (alt_rssi_avg >= (main_rssi_avg - 2)))) &&
46 (alt_rssi_avg >= 4))
47 result = true;
48 else
49 result = false;
50 break;
51 }
52
53 return result;
54}
55
56static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb,
57 struct ath_hw_antcomb_conf ant_conf,
58 int main_rssi_avg)
59{
60 antcomb->quick_scan_cnt = 0;
61
62 if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
63 antcomb->rssi_lna2 = main_rssi_avg;
64 else if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA1)
65 antcomb->rssi_lna1 = main_rssi_avg;
66
67 switch ((ant_conf.main_lna_conf << 4) | ant_conf.alt_lna_conf) {
68 case 0x10: /* LNA2 A-B */
69 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
70 antcomb->first_quick_scan_conf =
71 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
72 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
73 break;
74 case 0x20: /* LNA1 A-B */
75 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
76 antcomb->first_quick_scan_conf =
77 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
78 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
79 break;
80 case 0x21: /* LNA1 LNA2 */
81 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA2;
82 antcomb->first_quick_scan_conf =
83 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
84 antcomb->second_quick_scan_conf =
85 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
86 break;
87 case 0x12: /* LNA2 LNA1 */
88 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1;
89 antcomb->first_quick_scan_conf =
90 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
91 antcomb->second_quick_scan_conf =
92 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
93 break;
94 case 0x13: /* LNA2 A+B */
95 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
96 antcomb->first_quick_scan_conf =
97 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
98 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
99 break;
100 case 0x23: /* LNA1 A+B */
101 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
102 antcomb->first_quick_scan_conf =
103 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
104 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
105 break;
106 default:
107 break;
108 }
109}
110
111static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
112 struct ath_hw_antcomb_conf *div_ant_conf,
113 int main_rssi_avg, int alt_rssi_avg,
114 int alt_ratio)
115{
116 /* alt_good */
117 switch (antcomb->quick_scan_cnt) {
118 case 0:
119 /* set alt to main, and alt to first conf */
120 div_ant_conf->main_lna_conf = antcomb->main_conf;
121 div_ant_conf->alt_lna_conf = antcomb->first_quick_scan_conf;
122 break;
123 case 1:
124 /* set alt to main, and alt to first conf */
125 div_ant_conf->main_lna_conf = antcomb->main_conf;
126 div_ant_conf->alt_lna_conf = antcomb->second_quick_scan_conf;
127 antcomb->rssi_first = main_rssi_avg;
128 antcomb->rssi_second = alt_rssi_avg;
129
130 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
131 /* main is LNA1 */
132 if (ath_is_alt_ant_ratio_better(alt_ratio,
133 ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
134 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
135 main_rssi_avg, alt_rssi_avg,
136 antcomb->total_pkt_count))
137 antcomb->first_ratio = true;
138 else
139 antcomb->first_ratio = false;
140 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
141 if (ath_is_alt_ant_ratio_better(alt_ratio,
142 ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
143 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
144 main_rssi_avg, alt_rssi_avg,
145 antcomb->total_pkt_count))
146 antcomb->first_ratio = true;
147 else
148 antcomb->first_ratio = false;
149 } else {
150 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
151 (alt_rssi_avg > main_rssi_avg +
152 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
153 (alt_rssi_avg > main_rssi_avg)) &&
154 (antcomb->total_pkt_count > 50))
155 antcomb->first_ratio = true;
156 else
157 antcomb->first_ratio = false;
158 }
159 break;
160 case 2:
161 antcomb->alt_good = false;
162 antcomb->scan_not_start = false;
163 antcomb->scan = false;
164 antcomb->rssi_first = main_rssi_avg;
165 antcomb->rssi_third = alt_rssi_avg;
166
167 if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1)
168 antcomb->rssi_lna1 = alt_rssi_avg;
169 else if (antcomb->second_quick_scan_conf ==
170 ATH_ANT_DIV_COMB_LNA2)
171 antcomb->rssi_lna2 = alt_rssi_avg;
172 else if (antcomb->second_quick_scan_conf ==
173 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) {
174 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)
175 antcomb->rssi_lna2 = main_rssi_avg;
176 else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1)
177 antcomb->rssi_lna1 = main_rssi_avg;
178 }
179
180 if (antcomb->rssi_lna2 > antcomb->rssi_lna1 +
181 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)
182 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
183 else
184 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
185
186 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
187 if (ath_is_alt_ant_ratio_better(alt_ratio,
188 ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
189 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
190 main_rssi_avg, alt_rssi_avg,
191 antcomb->total_pkt_count))
192 antcomb->second_ratio = true;
193 else
194 antcomb->second_ratio = false;
195 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
196 if (ath_is_alt_ant_ratio_better(alt_ratio,
197 ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
198 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
199 main_rssi_avg, alt_rssi_avg,
200 antcomb->total_pkt_count))
201 antcomb->second_ratio = true;
202 else
203 antcomb->second_ratio = false;
204 } else {
205 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
206 (alt_rssi_avg > main_rssi_avg +
207 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
208 (alt_rssi_avg > main_rssi_avg)) &&
209 (antcomb->total_pkt_count > 50))
210 antcomb->second_ratio = true;
211 else
212 antcomb->second_ratio = false;
213 }
214
215 /* set alt to the conf with maximun ratio */
216 if (antcomb->first_ratio && antcomb->second_ratio) {
217 if (antcomb->rssi_second > antcomb->rssi_third) {
218 /* first alt*/
219 if ((antcomb->first_quick_scan_conf ==
220 ATH_ANT_DIV_COMB_LNA1) ||
221 (antcomb->first_quick_scan_conf ==
222 ATH_ANT_DIV_COMB_LNA2))
223 /* Set alt LNA1 or LNA2*/
224 if (div_ant_conf->main_lna_conf ==
225 ATH_ANT_DIV_COMB_LNA2)
226 div_ant_conf->alt_lna_conf =
227 ATH_ANT_DIV_COMB_LNA1;
228 else
229 div_ant_conf->alt_lna_conf =
230 ATH_ANT_DIV_COMB_LNA2;
231 else
232 /* Set alt to A+B or A-B */
233 div_ant_conf->alt_lna_conf =
234 antcomb->first_quick_scan_conf;
235 } else if ((antcomb->second_quick_scan_conf ==
236 ATH_ANT_DIV_COMB_LNA1) ||
237 (antcomb->second_quick_scan_conf ==
238 ATH_ANT_DIV_COMB_LNA2)) {
239 /* Set alt LNA1 or LNA2 */
240 if (div_ant_conf->main_lna_conf ==
241 ATH_ANT_DIV_COMB_LNA2)
242 div_ant_conf->alt_lna_conf =
243 ATH_ANT_DIV_COMB_LNA1;
244 else
245 div_ant_conf->alt_lna_conf =
246 ATH_ANT_DIV_COMB_LNA2;
247 } else {
248 /* Set alt to A+B or A-B */
249 div_ant_conf->alt_lna_conf =
250 antcomb->second_quick_scan_conf;
251 }
252 } else if (antcomb->first_ratio) {
253 /* first alt */
254 if ((antcomb->first_quick_scan_conf ==
255 ATH_ANT_DIV_COMB_LNA1) ||
256 (antcomb->first_quick_scan_conf ==
257 ATH_ANT_DIV_COMB_LNA2))
258 /* Set alt LNA1 or LNA2 */
259 if (div_ant_conf->main_lna_conf ==
260 ATH_ANT_DIV_COMB_LNA2)
261 div_ant_conf->alt_lna_conf =
262 ATH_ANT_DIV_COMB_LNA1;
263 else
264 div_ant_conf->alt_lna_conf =
265 ATH_ANT_DIV_COMB_LNA2;
266 else
267 /* Set alt to A+B or A-B */
268 div_ant_conf->alt_lna_conf =
269 antcomb->first_quick_scan_conf;
270 } else if (antcomb->second_ratio) {
271 /* second alt */
272 if ((antcomb->second_quick_scan_conf ==
273 ATH_ANT_DIV_COMB_LNA1) ||
274 (antcomb->second_quick_scan_conf ==
275 ATH_ANT_DIV_COMB_LNA2))
276 /* Set alt LNA1 or LNA2 */
277 if (div_ant_conf->main_lna_conf ==
278 ATH_ANT_DIV_COMB_LNA2)
279 div_ant_conf->alt_lna_conf =
280 ATH_ANT_DIV_COMB_LNA1;
281 else
282 div_ant_conf->alt_lna_conf =
283 ATH_ANT_DIV_COMB_LNA2;
284 else
285 /* Set alt to A+B or A-B */
286 div_ant_conf->alt_lna_conf =
287 antcomb->second_quick_scan_conf;
288 } else {
289 /* main is largest */
290 if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) ||
291 (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2))
292 /* Set alt LNA1 or LNA2 */
293 if (div_ant_conf->main_lna_conf ==
294 ATH_ANT_DIV_COMB_LNA2)
295 div_ant_conf->alt_lna_conf =
296 ATH_ANT_DIV_COMB_LNA1;
297 else
298 div_ant_conf->alt_lna_conf =
299 ATH_ANT_DIV_COMB_LNA2;
300 else
301 /* Set alt to A+B or A-B */
302 div_ant_conf->alt_lna_conf = antcomb->main_conf;
303 }
304 break;
305 default:
306 break;
307 }
308}
309
310static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
311 struct ath_ant_comb *antcomb,
312 int alt_ratio)
313{
314 if (ant_conf->div_group == 0) {
315 /* Adjust the fast_div_bias based on main and alt lna conf */
316 switch ((ant_conf->main_lna_conf << 4) |
317 ant_conf->alt_lna_conf) {
318 case 0x01: /* A-B LNA2 */
319 ant_conf->fast_div_bias = 0x3b;
320 break;
321 case 0x02: /* A-B LNA1 */
322 ant_conf->fast_div_bias = 0x3d;
323 break;
324 case 0x03: /* A-B A+B */
325 ant_conf->fast_div_bias = 0x1;
326 break;
327 case 0x10: /* LNA2 A-B */
328 ant_conf->fast_div_bias = 0x7;
329 break;
330 case 0x12: /* LNA2 LNA1 */
331 ant_conf->fast_div_bias = 0x2;
332 break;
333 case 0x13: /* LNA2 A+B */
334 ant_conf->fast_div_bias = 0x7;
335 break;
336 case 0x20: /* LNA1 A-B */
337 ant_conf->fast_div_bias = 0x6;
338 break;
339 case 0x21: /* LNA1 LNA2 */
340 ant_conf->fast_div_bias = 0x0;
341 break;
342 case 0x23: /* LNA1 A+B */
343 ant_conf->fast_div_bias = 0x6;
344 break;
345 case 0x30: /* A+B A-B */
346 ant_conf->fast_div_bias = 0x1;
347 break;
348 case 0x31: /* A+B LNA2 */
349 ant_conf->fast_div_bias = 0x3b;
350 break;
351 case 0x32: /* A+B LNA1 */
352 ant_conf->fast_div_bias = 0x3d;
353 break;
354 default:
355 break;
356 }
357 } else if (ant_conf->div_group == 1) {
358 /* Adjust the fast_div_bias based on main and alt_lna_conf */
359 switch ((ant_conf->main_lna_conf << 4) |
360 ant_conf->alt_lna_conf) {
361 case 0x01: /* A-B LNA2 */
362 ant_conf->fast_div_bias = 0x1;
363 ant_conf->main_gaintb = 0;
364 ant_conf->alt_gaintb = 0;
365 break;
366 case 0x02: /* A-B LNA1 */
367 ant_conf->fast_div_bias = 0x1;
368 ant_conf->main_gaintb = 0;
369 ant_conf->alt_gaintb = 0;
370 break;
371 case 0x03: /* A-B A+B */
372 ant_conf->fast_div_bias = 0x1;
373 ant_conf->main_gaintb = 0;
374 ant_conf->alt_gaintb = 0;
375 break;
376 case 0x10: /* LNA2 A-B */
377 if (!(antcomb->scan) &&
378 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
379 ant_conf->fast_div_bias = 0x3f;
380 else
381 ant_conf->fast_div_bias = 0x1;
382 ant_conf->main_gaintb = 0;
383 ant_conf->alt_gaintb = 0;
384 break;
385 case 0x12: /* LNA2 LNA1 */
386 ant_conf->fast_div_bias = 0x1;
387 ant_conf->main_gaintb = 0;
388 ant_conf->alt_gaintb = 0;
389 break;
390 case 0x13: /* LNA2 A+B */
391 if (!(antcomb->scan) &&
392 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
393 ant_conf->fast_div_bias = 0x3f;
394 else
395 ant_conf->fast_div_bias = 0x1;
396 ant_conf->main_gaintb = 0;
397 ant_conf->alt_gaintb = 0;
398 break;
399 case 0x20: /* LNA1 A-B */
400 if (!(antcomb->scan) &&
401 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
402 ant_conf->fast_div_bias = 0x3f;
403 else
404 ant_conf->fast_div_bias = 0x1;
405 ant_conf->main_gaintb = 0;
406 ant_conf->alt_gaintb = 0;
407 break;
408 case 0x21: /* LNA1 LNA2 */
409 ant_conf->fast_div_bias = 0x1;
410 ant_conf->main_gaintb = 0;
411 ant_conf->alt_gaintb = 0;
412 break;
413 case 0x23: /* LNA1 A+B */
414 if (!(antcomb->scan) &&
415 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
416 ant_conf->fast_div_bias = 0x3f;
417 else
418 ant_conf->fast_div_bias = 0x1;
419 ant_conf->main_gaintb = 0;
420 ant_conf->alt_gaintb = 0;
421 break;
422 case 0x30: /* A+B A-B */
423 ant_conf->fast_div_bias = 0x1;
424 ant_conf->main_gaintb = 0;
425 ant_conf->alt_gaintb = 0;
426 break;
427 case 0x31: /* A+B LNA2 */
428 ant_conf->fast_div_bias = 0x1;
429 ant_conf->main_gaintb = 0;
430 ant_conf->alt_gaintb = 0;
431 break;
432 case 0x32: /* A+B LNA1 */
433 ant_conf->fast_div_bias = 0x1;
434 ant_conf->main_gaintb = 0;
435 ant_conf->alt_gaintb = 0;
436 break;
437 default:
438 break;
439 }
440 } else if (ant_conf->div_group == 2) {
441 /* Adjust the fast_div_bias based on main and alt_lna_conf */
442 switch ((ant_conf->main_lna_conf << 4) |
443 ant_conf->alt_lna_conf) {
444 case 0x01: /* A-B LNA2 */
445 ant_conf->fast_div_bias = 0x1;
446 ant_conf->main_gaintb = 0;
447 ant_conf->alt_gaintb = 0;
448 break;
449 case 0x02: /* A-B LNA1 */
450 ant_conf->fast_div_bias = 0x1;
451 ant_conf->main_gaintb = 0;
452 ant_conf->alt_gaintb = 0;
453 break;
454 case 0x03: /* A-B A+B */
455 ant_conf->fast_div_bias = 0x1;
456 ant_conf->main_gaintb = 0;
457 ant_conf->alt_gaintb = 0;
458 break;
459 case 0x10: /* LNA2 A-B */
460 if (!(antcomb->scan) &&
461 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
462 ant_conf->fast_div_bias = 0x1;
463 else
464 ant_conf->fast_div_bias = 0x2;
465 ant_conf->main_gaintb = 0;
466 ant_conf->alt_gaintb = 0;
467 break;
468 case 0x12: /* LNA2 LNA1 */
469 ant_conf->fast_div_bias = 0x1;
470 ant_conf->main_gaintb = 0;
471 ant_conf->alt_gaintb = 0;
472 break;
473 case 0x13: /* LNA2 A+B */
474 if (!(antcomb->scan) &&
475 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
476 ant_conf->fast_div_bias = 0x1;
477 else
478 ant_conf->fast_div_bias = 0x2;
479 ant_conf->main_gaintb = 0;
480 ant_conf->alt_gaintb = 0;
481 break;
482 case 0x20: /* LNA1 A-B */
483 if (!(antcomb->scan) &&
484 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
485 ant_conf->fast_div_bias = 0x1;
486 else
487 ant_conf->fast_div_bias = 0x2;
488 ant_conf->main_gaintb = 0;
489 ant_conf->alt_gaintb = 0;
490 break;
491 case 0x21: /* LNA1 LNA2 */
492 ant_conf->fast_div_bias = 0x1;
493 ant_conf->main_gaintb = 0;
494 ant_conf->alt_gaintb = 0;
495 break;
496 case 0x23: /* LNA1 A+B */
497 if (!(antcomb->scan) &&
498 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
499 ant_conf->fast_div_bias = 0x1;
500 else
501 ant_conf->fast_div_bias = 0x2;
502 ant_conf->main_gaintb = 0;
503 ant_conf->alt_gaintb = 0;
504 break;
505 case 0x30: /* A+B A-B */
506 ant_conf->fast_div_bias = 0x1;
507 ant_conf->main_gaintb = 0;
508 ant_conf->alt_gaintb = 0;
509 break;
510 case 0x31: /* A+B LNA2 */
511 ant_conf->fast_div_bias = 0x1;
512 ant_conf->main_gaintb = 0;
513 ant_conf->alt_gaintb = 0;
514 break;
515 case 0x32: /* A+B LNA1 */
516 ant_conf->fast_div_bias = 0x1;
517 ant_conf->main_gaintb = 0;
518 ant_conf->alt_gaintb = 0;
519 break;
520 default:
521 break;
522 }
523 }
524}
525
526void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
527{
528 struct ath_hw_antcomb_conf div_ant_conf;
529 struct ath_ant_comb *antcomb = &sc->ant_comb;
530 int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set;
531 int curr_main_set;
532 int main_rssi = rs->rs_rssi_ctl0;
533 int alt_rssi = rs->rs_rssi_ctl1;
534 int rx_ant_conf, main_ant_conf;
535 bool short_scan = false;
536
537 rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) &
538 ATH_ANT_RX_MASK;
539 main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) &
540 ATH_ANT_RX_MASK;
541
542 /* Record packet only when both main_rssi and alt_rssi is positive */
543 if (main_rssi > 0 && alt_rssi > 0) {
544 antcomb->total_pkt_count++;
545 antcomb->main_total_rssi += main_rssi;
546 antcomb->alt_total_rssi += alt_rssi;
547 if (main_ant_conf == rx_ant_conf)
548 antcomb->main_recv_cnt++;
549 else
550 antcomb->alt_recv_cnt++;
551 }
552
553 /* Short scan check */
554 if (antcomb->scan && antcomb->alt_good) {
555 if (time_after(jiffies, antcomb->scan_start_time +
556 msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR)))
557 short_scan = true;
558 else
559 if (antcomb->total_pkt_count ==
560 ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) {
561 alt_ratio = ((antcomb->alt_recv_cnt * 100) /
562 antcomb->total_pkt_count);
563 if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
564 short_scan = true;
565 }
566 }
567
568 if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) ||
569 rs->rs_moreaggr) && !short_scan)
570 return;
571
572 if (antcomb->total_pkt_count) {
573 alt_ratio = ((antcomb->alt_recv_cnt * 100) /
574 antcomb->total_pkt_count);
575 main_rssi_avg = (antcomb->main_total_rssi /
576 antcomb->total_pkt_count);
577 alt_rssi_avg = (antcomb->alt_total_rssi /
578 antcomb->total_pkt_count);
579 }
580
581
582 ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf);
583 curr_alt_set = div_ant_conf.alt_lna_conf;
584 curr_main_set = div_ant_conf.main_lna_conf;
585
586 antcomb->count++;
587
588 if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) {
589 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) {
590 ath_lnaconf_alt_good_scan(antcomb, div_ant_conf,
591 main_rssi_avg);
592 antcomb->alt_good = true;
593 } else {
594 antcomb->alt_good = false;
595 }
596
597 antcomb->count = 0;
598 antcomb->scan = true;
599 antcomb->scan_not_start = true;
600 }
601
602 if (!antcomb->scan) {
603 if (ath_ant_div_comb_alt_check(div_ant_conf.div_group,
604 alt_ratio, curr_main_set, curr_alt_set,
605 alt_rssi_avg, main_rssi_avg)) {
606 if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) {
607 /* Switch main and alt LNA */
608 div_ant_conf.main_lna_conf =
609 ATH_ANT_DIV_COMB_LNA2;
610 div_ant_conf.alt_lna_conf =
611 ATH_ANT_DIV_COMB_LNA1;
612 } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) {
613 div_ant_conf.main_lna_conf =
614 ATH_ANT_DIV_COMB_LNA1;
615 div_ant_conf.alt_lna_conf =
616 ATH_ANT_DIV_COMB_LNA2;
617 }
618
619 goto div_comb_done;
620 } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) &&
621 (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) {
622 /* Set alt to another LNA */
623 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2)
624 div_ant_conf.alt_lna_conf =
625 ATH_ANT_DIV_COMB_LNA1;
626 else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1)
627 div_ant_conf.alt_lna_conf =
628 ATH_ANT_DIV_COMB_LNA2;
629
630 goto div_comb_done;
631 }
632
633 if ((alt_rssi_avg < (main_rssi_avg +
634 div_ant_conf.lna1_lna2_delta)))
635 goto div_comb_done;
636 }
637
638 if (!antcomb->scan_not_start) {
639 switch (curr_alt_set) {
640 case ATH_ANT_DIV_COMB_LNA2:
641 antcomb->rssi_lna2 = alt_rssi_avg;
642 antcomb->rssi_lna1 = main_rssi_avg;
643 antcomb->scan = true;
644 /* set to A+B */
645 div_ant_conf.main_lna_conf =
646 ATH_ANT_DIV_COMB_LNA1;
647 div_ant_conf.alt_lna_conf =
648 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
649 break;
650 case ATH_ANT_DIV_COMB_LNA1:
651 antcomb->rssi_lna1 = alt_rssi_avg;
652 antcomb->rssi_lna2 = main_rssi_avg;
653 antcomb->scan = true;
654 /* set to A+B */
655 div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
656 div_ant_conf.alt_lna_conf =
657 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
658 break;
659 case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
660 antcomb->rssi_add = alt_rssi_avg;
661 antcomb->scan = true;
662 /* set to A-B */
663 div_ant_conf.alt_lna_conf =
664 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
665 break;
666 case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2:
667 antcomb->rssi_sub = alt_rssi_avg;
668 antcomb->scan = false;
669 if (antcomb->rssi_lna2 >
670 (antcomb->rssi_lna1 +
671 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
672 /* use LNA2 as main LNA */
673 if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
674 (antcomb->rssi_add > antcomb->rssi_sub)) {
675 /* set to A+B */
676 div_ant_conf.main_lna_conf =
677 ATH_ANT_DIV_COMB_LNA2;
678 div_ant_conf.alt_lna_conf =
679 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
680 } else if (antcomb->rssi_sub >
681 antcomb->rssi_lna1) {
682 /* set to A-B */
683 div_ant_conf.main_lna_conf =
684 ATH_ANT_DIV_COMB_LNA2;
685 div_ant_conf.alt_lna_conf =
686 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
687 } else {
688 /* set to LNA1 */
689 div_ant_conf.main_lna_conf =
690 ATH_ANT_DIV_COMB_LNA2;
691 div_ant_conf.alt_lna_conf =
692 ATH_ANT_DIV_COMB_LNA1;
693 }
694 } else {
695 /* use LNA1 as main LNA */
696 if ((antcomb->rssi_add > antcomb->rssi_lna2) &&
697 (antcomb->rssi_add > antcomb->rssi_sub)) {
698 /* set to A+B */
699 div_ant_conf.main_lna_conf =
700 ATH_ANT_DIV_COMB_LNA1;
701 div_ant_conf.alt_lna_conf =
702 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
703 } else if (antcomb->rssi_sub >
704 antcomb->rssi_lna1) {
705 /* set to A-B */
706 div_ant_conf.main_lna_conf =
707 ATH_ANT_DIV_COMB_LNA1;
708 div_ant_conf.alt_lna_conf =
709 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
710 } else {
711 /* set to LNA2 */
712 div_ant_conf.main_lna_conf =
713 ATH_ANT_DIV_COMB_LNA1;
714 div_ant_conf.alt_lna_conf =
715 ATH_ANT_DIV_COMB_LNA2;
716 }
717 }
718 break;
719 default:
720 break;
721 }
722 } else {
723 if (!antcomb->alt_good) {
724 antcomb->scan_not_start = false;
725 /* Set alt to another LNA */
726 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) {
727 div_ant_conf.main_lna_conf =
728 ATH_ANT_DIV_COMB_LNA2;
729 div_ant_conf.alt_lna_conf =
730 ATH_ANT_DIV_COMB_LNA1;
731 } else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) {
732 div_ant_conf.main_lna_conf =
733 ATH_ANT_DIV_COMB_LNA1;
734 div_ant_conf.alt_lna_conf =
735 ATH_ANT_DIV_COMB_LNA2;
736 }
737 goto div_comb_done;
738 }
739 }
740
741 ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf,
742 main_rssi_avg, alt_rssi_avg,
743 alt_ratio);
744
745 antcomb->quick_scan_cnt++;
746
747div_comb_done:
748 ath_ant_div_conf_fast_divbias(&div_ant_conf, antcomb, alt_ratio);
749 ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf);
750
751 antcomb->scan_start_time = jiffies;
752 antcomb->total_pkt_count = 0;
753 antcomb->main_total_rssi = 0;
754 antcomb->alt_total_rssi = 0;
755 antcomb->main_recv_cnt = 0;
756 antcomb->alt_recv_cnt = 0;
757}
758
759void ath_ant_comb_update(struct ath_softc *sc)
760{
761 struct ath_hw *ah = sc->sc_ah;
762 struct ath_hw_antcomb_conf div_ant_conf;
763 u8 lna_conf;
764
765 ath9k_hw_antdiv_comb_conf_get(ah, &div_ant_conf);
766
767 if (sc->ant_rx == 1)
768 lna_conf = ATH_ANT_DIV_COMB_LNA1;
769 else
770 lna_conf = ATH_ANT_DIV_COMB_LNA2;
771
772 div_ant_conf.main_lna_conf = lna_conf;
773 div_ant_conf.alt_lna_conf = lna_conf;
774
775 ath9k_hw_antdiv_comb_conf_set(ah, &div_ant_conf);
776}
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index c7492c6a2519..874186bfda41 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -995,141 +995,6 @@ static u32 ar5008_hw_compute_pll_control(struct ath_hw *ah,
995 return pll; 995 return pll;
996} 996}
997 997
998static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
999 enum ath9k_ani_cmd cmd,
1000 int param)
1001{
1002 struct ar5416AniState *aniState = &ah->curchan->ani;
1003 struct ath_common *common = ath9k_hw_common(ah);
1004
1005 switch (cmd & ah->ani_function) {
1006 case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
1007 u32 level = param;
1008
1009 if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
1010 ath_dbg(common, ANI, "level out of range (%u > %zu)\n",
1011 level, ARRAY_SIZE(ah->totalSizeDesired));
1012 return false;
1013 }
1014
1015 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
1016 AR_PHY_DESIRED_SZ_TOT_DES,
1017 ah->totalSizeDesired[level]);
1018 REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
1019 AR_PHY_AGC_CTL1_COARSE_LOW,
1020 ah->coarse_low[level]);
1021 REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
1022 AR_PHY_AGC_CTL1_COARSE_HIGH,
1023 ah->coarse_high[level]);
1024 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
1025 AR_PHY_FIND_SIG_FIRPWR,
1026 ah->firpwr[level]);
1027
1028 if (level > aniState->noiseImmunityLevel)
1029 ah->stats.ast_ani_niup++;
1030 else if (level < aniState->noiseImmunityLevel)
1031 ah->stats.ast_ani_nidown++;
1032 aniState->noiseImmunityLevel = level;
1033 break;
1034 }
1035 case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
1036 u32 on = param ? 1 : 0;
1037
1038 if (on)
1039 REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
1040 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
1041 else
1042 REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
1043 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
1044
1045 if (!on != aniState->ofdmWeakSigDetectOff) {
1046 if (on)
1047 ah->stats.ast_ani_ofdmon++;
1048 else
1049 ah->stats.ast_ani_ofdmoff++;
1050 aniState->ofdmWeakSigDetectOff = !on;
1051 }
1052 break;
1053 }
1054 case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
1055 static const int weakSigThrCck[] = { 8, 6 };
1056 u32 high = param ? 1 : 0;
1057
1058 REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
1059 AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK,
1060 weakSigThrCck[high]);
1061 if (high != aniState->cckWeakSigThreshold) {
1062 if (high)
1063 ah->stats.ast_ani_cckhigh++;
1064 else
1065 ah->stats.ast_ani_ccklow++;
1066 aniState->cckWeakSigThreshold = high;
1067 }
1068 break;
1069 }
1070 case ATH9K_ANI_FIRSTEP_LEVEL:{
1071 static const int firstep[] = { 0, 4, 8 };
1072 u32 level = param;
1073
1074 if (level >= ARRAY_SIZE(firstep)) {
1075 ath_dbg(common, ANI, "level out of range (%u > %zu)\n",
1076 level, ARRAY_SIZE(firstep));
1077 return false;
1078 }
1079 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
1080 AR_PHY_FIND_SIG_FIRSTEP,
1081 firstep[level]);
1082 if (level > aniState->firstepLevel)
1083 ah->stats.ast_ani_stepup++;
1084 else if (level < aniState->firstepLevel)
1085 ah->stats.ast_ani_stepdown++;
1086 aniState->firstepLevel = level;
1087 break;
1088 }
1089 case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
1090 static const int cycpwrThr1[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
1091 u32 level = param;
1092
1093 if (level >= ARRAY_SIZE(cycpwrThr1)) {
1094 ath_dbg(common, ANI, "level out of range (%u > %zu)\n",
1095 level, ARRAY_SIZE(cycpwrThr1));
1096 return false;
1097 }
1098 REG_RMW_FIELD(ah, AR_PHY_TIMING5,
1099 AR_PHY_TIMING5_CYCPWR_THR1,
1100 cycpwrThr1[level]);
1101 if (level > aniState->spurImmunityLevel)
1102 ah->stats.ast_ani_spurup++;
1103 else if (level < aniState->spurImmunityLevel)
1104 ah->stats.ast_ani_spurdown++;
1105 aniState->spurImmunityLevel = level;
1106 break;
1107 }
1108 case ATH9K_ANI_PRESENT:
1109 break;
1110 default:
1111 ath_dbg(common, ANI, "invalid cmd %u\n", cmd);
1112 return false;
1113 }
1114
1115 ath_dbg(common, ANI, "ANI parameters:\n");
1116 ath_dbg(common, ANI,
1117 "noiseImmunityLevel=%d, spurImmunityLevel=%d, ofdmWeakSigDetectOff=%d\n",
1118 aniState->noiseImmunityLevel,
1119 aniState->spurImmunityLevel,
1120 !aniState->ofdmWeakSigDetectOff);
1121 ath_dbg(common, ANI,
1122 "cckWeakSigThreshold=%d, firstepLevel=%d, listenTime=%d\n",
1123 aniState->cckWeakSigThreshold,
1124 aniState->firstepLevel,
1125 aniState->listenTime);
1126 ath_dbg(common, ANI, "ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
1127 aniState->ofdmPhyErrCount,
1128 aniState->cckPhyErrCount);
1129
1130 return true;
1131}
1132
1133static bool ar5008_hw_ani_control_new(struct ath_hw *ah, 998static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1134 enum ath9k_ani_cmd cmd, 999 enum ath9k_ani_cmd cmd,
1135 int param) 1000 int param)
@@ -1206,18 +1071,18 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1206 REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW, 1071 REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
1207 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW); 1072 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
1208 1073
1209 if (!on != aniState->ofdmWeakSigDetectOff) { 1074 if (on != aniState->ofdmWeakSigDetect) {
1210 ath_dbg(common, ANI, 1075 ath_dbg(common, ANI,
1211 "** ch %d: ofdm weak signal: %s=>%s\n", 1076 "** ch %d: ofdm weak signal: %s=>%s\n",
1212 chan->channel, 1077 chan->channel,
1213 !aniState->ofdmWeakSigDetectOff ? 1078 aniState->ofdmWeakSigDetect ?
1214 "on" : "off", 1079 "on" : "off",
1215 on ? "on" : "off"); 1080 on ? "on" : "off");
1216 if (on) 1081 if (on)
1217 ah->stats.ast_ani_ofdmon++; 1082 ah->stats.ast_ani_ofdmon++;
1218 else 1083 else
1219 ah->stats.ast_ani_ofdmoff++; 1084 ah->stats.ast_ani_ofdmoff++;
1220 aniState->ofdmWeakSigDetectOff = !on; 1085 aniState->ofdmWeakSigDetect = on;
1221 } 1086 }
1222 break; 1087 break;
1223 } 1088 }
@@ -1236,7 +1101,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1236 * from INI file & cap value 1101 * from INI file & cap value
1237 */ 1102 */
1238 value = firstep_table[level] - 1103 value = firstep_table[level] -
1239 firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] + 1104 firstep_table[ATH9K_ANI_FIRSTEP_LVL] +
1240 aniState->iniDef.firstep; 1105 aniState->iniDef.firstep;
1241 if (value < ATH9K_SIG_FIRSTEP_SETTING_MIN) 1106 if (value < ATH9K_SIG_FIRSTEP_SETTING_MIN)
1242 value = ATH9K_SIG_FIRSTEP_SETTING_MIN; 1107 value = ATH9K_SIG_FIRSTEP_SETTING_MIN;
@@ -1251,7 +1116,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1251 * from INI file & cap value 1116 * from INI file & cap value
1252 */ 1117 */
1253 value2 = firstep_table[level] - 1118 value2 = firstep_table[level] -
1254 firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] + 1119 firstep_table[ATH9K_ANI_FIRSTEP_LVL] +
1255 aniState->iniDef.firstepLow; 1120 aniState->iniDef.firstepLow;
1256 if (value2 < ATH9K_SIG_FIRSTEP_SETTING_MIN) 1121 if (value2 < ATH9K_SIG_FIRSTEP_SETTING_MIN)
1257 value2 = ATH9K_SIG_FIRSTEP_SETTING_MIN; 1122 value2 = ATH9K_SIG_FIRSTEP_SETTING_MIN;
@@ -1267,7 +1132,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1267 chan->channel, 1132 chan->channel,
1268 aniState->firstepLevel, 1133 aniState->firstepLevel,
1269 level, 1134 level,
1270 ATH9K_ANI_FIRSTEP_LVL_NEW, 1135 ATH9K_ANI_FIRSTEP_LVL,
1271 value, 1136 value,
1272 aniState->iniDef.firstep); 1137 aniState->iniDef.firstep);
1273 ath_dbg(common, ANI, 1138 ath_dbg(common, ANI,
@@ -1275,7 +1140,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1275 chan->channel, 1140 chan->channel,
1276 aniState->firstepLevel, 1141 aniState->firstepLevel,
1277 level, 1142 level,
1278 ATH9K_ANI_FIRSTEP_LVL_NEW, 1143 ATH9K_ANI_FIRSTEP_LVL,
1279 value2, 1144 value2,
1280 aniState->iniDef.firstepLow); 1145 aniState->iniDef.firstepLow);
1281 if (level > aniState->firstepLevel) 1146 if (level > aniState->firstepLevel)
@@ -1300,7 +1165,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1300 * from INI file & cap value 1165 * from INI file & cap value
1301 */ 1166 */
1302 value = cycpwrThr1_table[level] - 1167 value = cycpwrThr1_table[level] -
1303 cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] + 1168 cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL] +
1304 aniState->iniDef.cycpwrThr1; 1169 aniState->iniDef.cycpwrThr1;
1305 if (value < ATH9K_SIG_SPUR_IMM_SETTING_MIN) 1170 if (value < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
1306 value = ATH9K_SIG_SPUR_IMM_SETTING_MIN; 1171 value = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
@@ -1316,7 +1181,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1316 * from INI file & cap value 1181 * from INI file & cap value
1317 */ 1182 */
1318 value2 = cycpwrThr1_table[level] - 1183 value2 = cycpwrThr1_table[level] -
1319 cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] + 1184 cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL] +
1320 aniState->iniDef.cycpwrThr1Ext; 1185 aniState->iniDef.cycpwrThr1Ext;
1321 if (value2 < ATH9K_SIG_SPUR_IMM_SETTING_MIN) 1186 if (value2 < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
1322 value2 = ATH9K_SIG_SPUR_IMM_SETTING_MIN; 1187 value2 = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
@@ -1331,7 +1196,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1331 chan->channel, 1196 chan->channel,
1332 aniState->spurImmunityLevel, 1197 aniState->spurImmunityLevel,
1333 level, 1198 level,
1334 ATH9K_ANI_SPUR_IMMUNE_LVL_NEW, 1199 ATH9K_ANI_SPUR_IMMUNE_LVL,
1335 value, 1200 value,
1336 aniState->iniDef.cycpwrThr1); 1201 aniState->iniDef.cycpwrThr1);
1337 ath_dbg(common, ANI, 1202 ath_dbg(common, ANI,
@@ -1339,7 +1204,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1339 chan->channel, 1204 chan->channel,
1340 aniState->spurImmunityLevel, 1205 aniState->spurImmunityLevel,
1341 level, 1206 level,
1342 ATH9K_ANI_SPUR_IMMUNE_LVL_NEW, 1207 ATH9K_ANI_SPUR_IMMUNE_LVL,
1343 value2, 1208 value2,
1344 aniState->iniDef.cycpwrThr1Ext); 1209 aniState->iniDef.cycpwrThr1Ext);
1345 if (level > aniState->spurImmunityLevel) 1210 if (level > aniState->spurImmunityLevel)
@@ -1367,9 +1232,9 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1367 ath_dbg(common, ANI, 1232 ath_dbg(common, ANI,
1368 "ANI parameters: SI=%d, ofdmWS=%s FS=%d MRCcck=%s listenTime=%d ofdmErrs=%d cckErrs=%d\n", 1233 "ANI parameters: SI=%d, ofdmWS=%s FS=%d MRCcck=%s listenTime=%d ofdmErrs=%d cckErrs=%d\n",
1369 aniState->spurImmunityLevel, 1234 aniState->spurImmunityLevel,
1370 !aniState->ofdmWeakSigDetectOff ? "on" : "off", 1235 aniState->ofdmWeakSigDetect ? "on" : "off",
1371 aniState->firstepLevel, 1236 aniState->firstepLevel,
1372 !aniState->mrcCCKOff ? "on" : "off", 1237 aniState->mrcCCK ? "on" : "off",
1373 aniState->listenTime, 1238 aniState->listenTime,
1374 aniState->ofdmPhyErrCount, 1239 aniState->ofdmPhyErrCount,
1375 aniState->cckPhyErrCount); 1240 aniState->cckPhyErrCount);
@@ -1454,10 +1319,10 @@ static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah)
1454 AR_PHY_EXT_TIMING5_CYCPWR_THR1); 1319 AR_PHY_EXT_TIMING5_CYCPWR_THR1);
1455 1320
1456 /* these levels just got reset to defaults by the INI */ 1321 /* these levels just got reset to defaults by the INI */
1457 aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL_NEW; 1322 aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
1458 aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW; 1323 aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
1459 aniState->ofdmWeakSigDetectOff = !ATH9K_ANI_USE_OFDM_WEAK_SIG; 1324 aniState->ofdmWeakSigDetect = ATH9K_ANI_USE_OFDM_WEAK_SIG;
1460 aniState->mrcCCKOff = true; /* not available on pre AR9003 */ 1325 aniState->mrcCCK = false; /* not available on pre AR9003 */
1461} 1326}
1462 1327
1463static void ar5008_hw_set_nf_limits(struct ath_hw *ah) 1328static void ar5008_hw_set_nf_limits(struct ath_hw *ah)
@@ -1545,11 +1410,8 @@ void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
1545 priv_ops->do_getnf = ar5008_hw_do_getnf; 1410 priv_ops->do_getnf = ar5008_hw_do_getnf;
1546 priv_ops->set_radar_params = ar5008_hw_set_radar_params; 1411 priv_ops->set_radar_params = ar5008_hw_set_radar_params;
1547 1412
1548 if (modparam_force_new_ani) { 1413 priv_ops->ani_control = ar5008_hw_ani_control_new;
1549 priv_ops->ani_control = ar5008_hw_ani_control_new; 1414 priv_ops->ani_cache_ini_regs = ar5008_hw_ani_cache_ini_regs;
1550 priv_ops->ani_cache_ini_regs = ar5008_hw_ani_cache_ini_regs;
1551 } else
1552 priv_ops->ani_control = ar5008_hw_ani_control_old;
1553 1415
1554 if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) 1416 if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah))
1555 priv_ops->compute_pll_control = ar9160_hw_compute_pll_control; 1417 priv_ops->compute_pll_control = ar9160_hw_compute_pll_control;
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index d9a69fc470cd..edf21ea4fe93 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -21,10 +21,6 @@
21#include "ar9002_initvals.h" 21#include "ar9002_initvals.h"
22#include "ar9002_phy.h" 22#include "ar9002_phy.h"
23 23
24int modparam_force_new_ani;
25module_param_named(force_new_ani, modparam_force_new_ani, int, 0444);
26MODULE_PARM_DESC(force_new_ani, "Force new ANI for AR5008, AR9001, AR9002");
27
28/* General hardware code for the A5008/AR9001/AR9002 hadware families */ 24/* General hardware code for the A5008/AR9001/AR9002 hadware families */
29 25
30static void ar9002_hw_init_mode_regs(struct ath_hw *ah) 26static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index 952cb2b4656b..89bf94d4d8a1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2010-2011 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
3 * 4 *
4 * Permission to use, copy, modify, and/or distribute this software for any 5 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 6 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 9fdd70fcaf5b..84b558d126ca 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -159,14 +159,11 @@ static bool ar9003_hw_calibrate(struct ath_hw *ah,
159 } 159 }
160 } 160 }
161 161
162 /* Do NF cal only at longer intervals */ 162 /*
163 if (longcal) { 163 * Do NF cal only at longer intervals. Get the value from
164 /* 164 * the previous NF cal and update history buffer.
165 * Get the value from the previous NF cal and update 165 */
166 * history buffer. 166 if (longcal && ath9k_hw_getnf(ah, chan)) {
167 */
168 ath9k_hw_getnf(ah, chan);
169
170 /* 167 /*
171 * Load the NF from history buffer of the current channel. 168 * Load the NF from history buffer of the current channel.
172 * NF is slow time-variant, so it is OK to use a historical 169 * NF is slow time-variant, so it is OK to use a historical
@@ -653,7 +650,6 @@ static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
653} 650}
654 651
655static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah, 652static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
656 u8 num_chains,
657 struct coeff *coeff, 653 struct coeff *coeff,
658 bool is_reusable) 654 bool is_reusable)
659{ 655{
@@ -677,7 +673,9 @@ static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
677 } 673 }
678 674
679 /* Load the average of 2 passes */ 675 /* Load the average of 2 passes */
680 for (i = 0; i < num_chains; i++) { 676 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
677 if (!(ah->txchainmask & (1 << i)))
678 continue;
681 nmeasurement = REG_READ_FIELD(ah, 679 nmeasurement = REG_READ_FIELD(ah,
682 AR_PHY_TX_IQCAL_STATUS_B0, 680 AR_PHY_TX_IQCAL_STATUS_B0,
683 AR_PHY_CALIBRATED_GAINS_0); 681 AR_PHY_CALIBRATED_GAINS_0);
@@ -767,16 +765,13 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
767 }; 765 };
768 struct coeff coeff; 766 struct coeff coeff;
769 s32 iq_res[6]; 767 s32 iq_res[6];
770 u8 num_chains = 0;
771 int i, im, j; 768 int i, im, j;
772 int nmeasurement; 769 int nmeasurement;
773 770
774 for (i = 0; i < AR9300_MAX_CHAINS; i++) { 771 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
775 if (ah->txchainmask & (1 << i)) 772 if (!(ah->txchainmask & (1 << i)))
776 num_chains++; 773 continue;
777 }
778 774
779 for (i = 0; i < num_chains; i++) {
780 nmeasurement = REG_READ_FIELD(ah, 775 nmeasurement = REG_READ_FIELD(ah,
781 AR_PHY_TX_IQCAL_STATUS_B0, 776 AR_PHY_TX_IQCAL_STATUS_B0,
782 AR_PHY_CALIBRATED_GAINS_0); 777 AR_PHY_CALIBRATED_GAINS_0);
@@ -839,8 +834,7 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
839 coeff.phs_coeff[i][im] -= 128; 834 coeff.phs_coeff[i][im] -= 128;
840 } 835 }
841 } 836 }
842 ar9003_hw_tx_iqcal_load_avg_2_passes(ah, num_chains, 837 ar9003_hw_tx_iqcal_load_avg_2_passes(ah, &coeff, is_reusable);
843 &coeff, is_reusable);
844 838
845 return; 839 return;
846 840
@@ -901,7 +895,6 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
901 bool is_reusable = true, status = true; 895 bool is_reusable = true, status = true;
902 bool run_rtt_cal = false, run_agc_cal; 896 bool run_rtt_cal = false, run_agc_cal;
903 bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT); 897 bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT);
904 bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI);
905 u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL | 898 u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL |
906 AR_PHY_AGC_CONTROL_FLTR_CAL | 899 AR_PHY_AGC_CONTROL_FLTR_CAL |
907 AR_PHY_AGC_CONTROL_PKDET_CAL; 900 AR_PHY_AGC_CONTROL_PKDET_CAL;
@@ -970,7 +963,7 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
970 } else if (caldata && !caldata->done_txiqcal_once) 963 } else if (caldata && !caldata->done_txiqcal_once)
971 run_agc_cal = true; 964 run_agc_cal = true;
972 965
973 if (mci && IS_CHAN_2GHZ(chan) && run_agc_cal) 966 if (ath9k_hw_mci_is_enabled(ah) && IS_CHAN_2GHZ(chan) && run_agc_cal)
974 ar9003_mci_init_cal_req(ah, &is_reusable); 967 ar9003_mci_init_cal_req(ah, &is_reusable);
975 968
976 if (!(IS_CHAN_HALF_RATE(chan) || IS_CHAN_QUARTER_RATE(chan))) { 969 if (!(IS_CHAN_HALF_RATE(chan) || IS_CHAN_QUARTER_RATE(chan))) {
@@ -993,7 +986,7 @@ skip_tx_iqcal:
993 0, AH_WAIT_TIMEOUT); 986 0, AH_WAIT_TIMEOUT);
994 } 987 }
995 988
996 if (mci && IS_CHAN_2GHZ(chan) && run_agc_cal) 989 if (ath9k_hw_mci_is_enabled(ah) && IS_CHAN_2GHZ(chan) && run_agc_cal)
997 ar9003_mci_init_cal_done(ah); 990 ar9003_mci_init_cal_done(ah);
998 991
999 if (rtt && !run_rtt_cal) { 992 if (rtt && !run_rtt_cal) {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index dfb0441f406c..ab2bfcb3bed2 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3178,7 +3178,7 @@ static int ar9300_compress_decision(struct ath_hw *ah,
3178 mdata_size, length); 3178 mdata_size, length);
3179 return -1; 3179 return -1;
3180 } 3180 }
3181 memcpy(mptr, (u8 *) (word + COMP_HDR_LEN), length); 3181 memcpy(mptr, word + COMP_HDR_LEN, length);
3182 ath_dbg(common, EEPROM, 3182 ath_dbg(common, EEPROM,
3183 "restored eeprom %d: uncompressed, length %d\n", 3183 "restored eeprom %d: uncompressed, length %d\n",
3184 it, length); 3184 it, length);
@@ -3199,7 +3199,7 @@ static int ar9300_compress_decision(struct ath_hw *ah,
3199 "restore eeprom %d: block, reference %d, length %d\n", 3199 "restore eeprom %d: block, reference %d, length %d\n",
3200 it, reference, length); 3200 it, reference, length);
3201 ar9300_uncompress_block(ah, mptr, mdata_size, 3201 ar9300_uncompress_block(ah, mptr, mdata_size,
3202 (u8 *) (word + COMP_HDR_LEN), length); 3202 (word + COMP_HDR_LEN), length);
3203 break; 3203 break;
3204 default: 3204 default:
3205 ath_dbg(common, EEPROM, "unknown compression code %d\n", code); 3205 ath_dbg(common, EEPROM, "unknown compression code %d\n", code);
@@ -3412,11 +3412,11 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
3412 if (!dump_base_hdr) { 3412 if (!dump_base_hdr) {
3413 len += snprintf(buf + len, size - len, 3413 len += snprintf(buf + len, size - len,
3414 "%20s :\n", "2GHz modal Header"); 3414 "%20s :\n", "2GHz modal Header");
3415 len += ar9003_dump_modal_eeprom(buf, len, size, 3415 len = ar9003_dump_modal_eeprom(buf, len, size,
3416 &eep->modalHeader2G); 3416 &eep->modalHeader2G);
3417 len += snprintf(buf + len, size - len, 3417 len += snprintf(buf + len, size - len,
3418 "%20s :\n", "5GHz modal Header"); 3418 "%20s :\n", "5GHz modal Header");
3419 len += ar9003_dump_modal_eeprom(buf, len, size, 3419 len = ar9003_dump_modal_eeprom(buf, len, size,
3420 &eep->modalHeader5G); 3420 &eep->modalHeader5G);
3421 goto out; 3421 goto out;
3422 } 3422 }
@@ -3509,7 +3509,7 @@ static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
3509 3509
3510 if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah)) 3510 if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah))
3511 REG_RMW_FIELD(ah, AR_CH0_TOP2, AR_CH0_TOP2_XPABIASLVL, bias); 3511 REG_RMW_FIELD(ah, AR_CH0_TOP2, AR_CH0_TOP2_XPABIASLVL, bias);
3512 else if (AR_SREV_9462(ah)) 3512 else if (AR_SREV_9462(ah) || AR_SREV_9550(ah))
3513 REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias); 3513 REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
3514 else { 3514 else {
3515 REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias); 3515 REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
@@ -3591,6 +3591,9 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
3591 if (AR_SREV_9462(ah)) { 3591 if (AR_SREV_9462(ah)) {
3592 REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM, 3592 REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
3593 AR_SWITCH_TABLE_COM_AR9462_ALL, value); 3593 AR_SWITCH_TABLE_COM_AR9462_ALL, value);
3594 } else if (AR_SREV_9550(ah)) {
3595 REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
3596 AR_SWITCH_TABLE_COM_AR9550_ALL, value);
3594 } else 3597 } else
3595 REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM, 3598 REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
3596 AR_SWITCH_TABLE_COM_ALL, value); 3599 AR_SWITCH_TABLE_COM_ALL, value);
@@ -3613,6 +3616,7 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
3613 value = ar9003_switch_com_spdt_get(ah, is2ghz); 3616 value = ar9003_switch_com_spdt_get(ah, is2ghz);
3614 REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL, 3617 REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL,
3615 AR_SWITCH_TABLE_COM_SPDT_ALL, value); 3618 AR_SWITCH_TABLE_COM_SPDT_ALL, value);
3619 REG_SET_BIT(ah, AR_PHY_GLB_CONTROL, AR_BTCOEX_CTRL_SPDT_ENABLE);
3616 } 3620 }
3617 3621
3618 value = ar9003_hw_ant_ctrl_common_2_get(ah, is2ghz); 3622 value = ar9003_hw_ant_ctrl_common_2_get(ah, is2ghz);
@@ -3956,7 +3960,7 @@ static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
3956 ar9003_hw_drive_strength_apply(ah); 3960 ar9003_hw_drive_strength_apply(ah);
3957 ar9003_hw_atten_apply(ah, chan); 3961 ar9003_hw_atten_apply(ah, chan);
3958 ar9003_hw_quick_drop_apply(ah, chan->channel); 3962 ar9003_hw_quick_drop_apply(ah, chan->channel);
3959 if (!AR_SREV_9330(ah) && !AR_SREV_9340(ah)) 3963 if (!AR_SREV_9330(ah) && !AR_SREV_9340(ah) && !AR_SREV_9550(ah))
3960 ar9003_hw_internal_regulator_apply(ah); 3964 ar9003_hw_internal_regulator_apply(ah);
3961 if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah)) 3965 if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah))
3962 ar9003_hw_apply_tuning_caps(ah); 3966 ar9003_hw_apply_tuning_caps(ah);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index a0e3394b10dc..41e88c660e48 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -21,6 +21,7 @@
21#include "ar9340_initvals.h" 21#include "ar9340_initvals.h"
22#include "ar9330_1p1_initvals.h" 22#include "ar9330_1p1_initvals.h"
23#include "ar9330_1p2_initvals.h" 23#include "ar9330_1p2_initvals.h"
24#include "ar955x_1p0_initvals.h"
24#include "ar9580_1p0_initvals.h" 25#include "ar9580_1p0_initvals.h"
25#include "ar9462_2p0_initvals.h" 26#include "ar9462_2p0_initvals.h"
26 27
@@ -327,7 +328,61 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
327 328
328 INIT_INI_ARRAY(&ah->ini_japan2484, AR9462_BBC_TXIFR_COEFFJ, 329 INIT_INI_ARRAY(&ah->ini_japan2484, AR9462_BBC_TXIFR_COEFFJ,
329 ARRAY_SIZE(AR9462_BBC_TXIFR_COEFFJ), 2); 330 ARRAY_SIZE(AR9462_BBC_TXIFR_COEFFJ), 2);
331 } else if (AR_SREV_9550(ah)) {
332 /* mac */
333 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
334 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
335 ar955x_1p0_mac_core,
336 ARRAY_SIZE(ar955x_1p0_mac_core), 2);
337 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
338 ar955x_1p0_mac_postamble,
339 ARRAY_SIZE(ar955x_1p0_mac_postamble), 5);
340
341 /* bb */
342 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
343 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
344 ar955x_1p0_baseband_core,
345 ARRAY_SIZE(ar955x_1p0_baseband_core), 2);
346 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
347 ar955x_1p0_baseband_postamble,
348 ARRAY_SIZE(ar955x_1p0_baseband_postamble), 5);
349
350 /* radio */
351 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
352 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
353 ar955x_1p0_radio_core,
354 ARRAY_SIZE(ar955x_1p0_radio_core), 2);
355 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
356 ar955x_1p0_radio_postamble,
357 ARRAY_SIZE(ar955x_1p0_radio_postamble), 5);
358
359 /* soc */
360 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
361 ar955x_1p0_soc_preamble,
362 ARRAY_SIZE(ar955x_1p0_soc_preamble), 2);
363 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
364 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
365 ar955x_1p0_soc_postamble,
366 ARRAY_SIZE(ar955x_1p0_soc_postamble), 5);
330 367
368 /* rx/tx gain */
369 INIT_INI_ARRAY(&ah->iniModesRxGain,
370 ar955x_1p0_common_wo_xlna_rx_gain_table,
371 ARRAY_SIZE(ar955x_1p0_common_wo_xlna_rx_gain_table),
372 2);
373 INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
374 ar955x_1p0_common_wo_xlna_rx_gain_bounds,
375 ARRAY_SIZE(ar955x_1p0_common_wo_xlna_rx_gain_bounds),
376 5);
377 INIT_INI_ARRAY(&ah->iniModesTxGain,
378 ar955x_1p0_modes_xpa_tx_gain_table,
379 ARRAY_SIZE(ar955x_1p0_modes_xpa_tx_gain_table),
380 9);
381
382 /* Fast clock modal settings */
383 INIT_INI_ARRAY(&ah->iniModesFastClock,
384 ar955x_1p0_modes_fast_clock,
385 ARRAY_SIZE(ar955x_1p0_modes_fast_clock), 3);
331 } else if (AR_SREV_9580(ah)) { 386 } else if (AR_SREV_9580(ah)) {
332 /* mac */ 387 /* mac */
333 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0); 388 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
@@ -470,6 +525,11 @@ static void ar9003_tx_gain_table_mode0(struct ath_hw *ah)
470 ar9485_modes_lowest_ob_db_tx_gain_1_1, 525 ar9485_modes_lowest_ob_db_tx_gain_1_1,
471 ARRAY_SIZE(ar9485_modes_lowest_ob_db_tx_gain_1_1), 526 ARRAY_SIZE(ar9485_modes_lowest_ob_db_tx_gain_1_1),
472 5); 527 5);
528 else if (AR_SREV_9550(ah))
529 INIT_INI_ARRAY(&ah->iniModesTxGain,
530 ar955x_1p0_modes_xpa_tx_gain_table,
531 ARRAY_SIZE(ar955x_1p0_modes_xpa_tx_gain_table),
532 9);
473 else if (AR_SREV_9580(ah)) 533 else if (AR_SREV_9580(ah))
474 INIT_INI_ARRAY(&ah->iniModesTxGain, 534 INIT_INI_ARRAY(&ah->iniModesTxGain,
475 ar9580_1p0_lowest_ob_db_tx_gain_table, 535 ar9580_1p0_lowest_ob_db_tx_gain_table,
@@ -514,6 +574,11 @@ static void ar9003_tx_gain_table_mode1(struct ath_hw *ah)
514 ar9580_1p0_high_ob_db_tx_gain_table, 574 ar9580_1p0_high_ob_db_tx_gain_table,
515 ARRAY_SIZE(ar9580_1p0_high_ob_db_tx_gain_table), 575 ARRAY_SIZE(ar9580_1p0_high_ob_db_tx_gain_table),
516 5); 576 5);
577 else if (AR_SREV_9550(ah))
578 INIT_INI_ARRAY(&ah->iniModesTxGain,
579 ar955x_1p0_modes_no_xpa_tx_gain_table,
580 ARRAY_SIZE(ar955x_1p0_modes_no_xpa_tx_gain_table),
581 9);
517 else if (AR_SREV_9462_20(ah)) 582 else if (AR_SREV_9462_20(ah))
518 INIT_INI_ARRAY(&ah->iniModesTxGain, 583 INIT_INI_ARRAY(&ah->iniModesTxGain,
519 ar9462_modes_high_ob_db_tx_gain_table_2p0, 584 ar9462_modes_high_ob_db_tx_gain_table_2p0,
@@ -635,7 +700,16 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
635 ar9485Common_wo_xlna_rx_gain_1_1, 700 ar9485Common_wo_xlna_rx_gain_1_1,
636 ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_1), 701 ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_1),
637 2); 702 2);
638 else if (AR_SREV_9580(ah)) 703 else if (AR_SREV_9550(ah)) {
704 INIT_INI_ARRAY(&ah->iniModesRxGain,
705 ar955x_1p0_common_rx_gain_table,
706 ARRAY_SIZE(ar955x_1p0_common_rx_gain_table),
707 2);
708 INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
709 ar955x_1p0_common_rx_gain_bounds,
710 ARRAY_SIZE(ar955x_1p0_common_rx_gain_bounds),
711 5);
712 } else if (AR_SREV_9580(ah))
639 INIT_INI_ARRAY(&ah->iniModesRxGain, 713 INIT_INI_ARRAY(&ah->iniModesRxGain,
640 ar9580_1p0_rx_gain_table, 714 ar9580_1p0_rx_gain_table,
641 ARRAY_SIZE(ar9580_1p0_rx_gain_table), 715 ARRAY_SIZE(ar9580_1p0_rx_gain_table),
@@ -679,7 +753,16 @@ static void ar9003_rx_gain_table_mode1(struct ath_hw *ah)
679 ar9462_common_wo_xlna_rx_gain_table_2p0, 753 ar9462_common_wo_xlna_rx_gain_table_2p0,
680 ARRAY_SIZE(ar9462_common_wo_xlna_rx_gain_table_2p0), 754 ARRAY_SIZE(ar9462_common_wo_xlna_rx_gain_table_2p0),
681 2); 755 2);
682 else if (AR_SREV_9580(ah)) 756 else if (AR_SREV_9550(ah)) {
757 INIT_INI_ARRAY(&ah->iniModesRxGain,
758 ar955x_1p0_common_wo_xlna_rx_gain_table,
759 ARRAY_SIZE(ar955x_1p0_common_wo_xlna_rx_gain_table),
760 2);
761 INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
762 ar955x_1p0_common_wo_xlna_rx_gain_bounds,
763 ARRAY_SIZE(ar955x_1p0_common_wo_xlna_rx_gain_bounds),
764 5);
765 } else if (AR_SREV_9580(ah))
683 INIT_INI_ARRAY(&ah->iniModesRxGain, 766 INIT_INI_ARRAY(&ah->iniModesRxGain,
684 ar9580_1p0_wo_xlna_rx_gain_table, 767 ar9580_1p0_wo_xlna_rx_gain_table,
685 ARRAY_SIZE(ar9580_1p0_wo_xlna_rx_gain_table), 768 ARRAY_SIZE(ar9580_1p0_wo_xlna_rx_gain_table),
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index d9e0824af093..78816b8b2173 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -181,11 +181,14 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
181 u32 mask2 = 0; 181 u32 mask2 = 0;
182 struct ath9k_hw_capabilities *pCap = &ah->caps; 182 struct ath9k_hw_capabilities *pCap = &ah->caps;
183 struct ath_common *common = ath9k_hw_common(ah); 183 struct ath_common *common = ath9k_hw_common(ah);
184 u32 sync_cause = 0, async_cause; 184 u32 sync_cause = 0, async_cause, async_mask = AR_INTR_MAC_IRQ;
185
186 if (ath9k_hw_mci_is_enabled(ah))
187 async_mask |= AR_INTR_ASYNC_MASK_MCI;
185 188
186 async_cause = REG_READ(ah, AR_INTR_ASYNC_CAUSE); 189 async_cause = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
187 190
188 if (async_cause & (AR_INTR_MAC_IRQ | AR_INTR_ASYNC_MASK_MCI)) { 191 if (async_cause & async_mask) {
189 if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M) 192 if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
190 == AR_RTC_STATUS_ON) 193 == AR_RTC_STATUS_ON)
191 isr = REG_READ(ah, AR_ISR); 194 isr = REG_READ(ah, AR_ISR);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
index ffbb180f91e1..9a34fcaae3ff 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -35,31 +35,30 @@ static int ar9003_mci_wait_for_interrupt(struct ath_hw *ah, u32 address,
35 struct ath_common *common = ath9k_hw_common(ah); 35 struct ath_common *common = ath9k_hw_common(ah);
36 36
37 while (time_out) { 37 while (time_out) {
38 if (REG_READ(ah, address) & bit_position) { 38 if (!(REG_READ(ah, address) & bit_position)) {
39 REG_WRITE(ah, address, bit_position); 39 udelay(10);
40 40 time_out -= 10;
41 if (address == AR_MCI_INTERRUPT_RX_MSG_RAW) {
42 if (bit_position &
43 AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
44 ar9003_mci_reset_req_wakeup(ah);
45
46 if (bit_position &
47 (AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING |
48 AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING))
49 REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
50 AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE);
51
52 REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
53 AR_MCI_INTERRUPT_RX_MSG);
54 }
55 break;
56 }
57 41
58 udelay(10); 42 if (time_out < 0)
59 time_out -= 10; 43 break;
44 else
45 continue;
46 }
47 REG_WRITE(ah, address, bit_position);
60 48
61 if (time_out < 0) 49 if (address != AR_MCI_INTERRUPT_RX_MSG_RAW)
62 break; 50 break;
51
52 if (bit_position & AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
53 ar9003_mci_reset_req_wakeup(ah);
54
55 if (bit_position & (AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING |
56 AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING))
57 REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
58 AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE);
59
60 REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, AR_MCI_INTERRUPT_RX_MSG);
61 break;
63 } 62 }
64 63
65 if (time_out <= 0) { 64 if (time_out <= 0) {
@@ -127,14 +126,13 @@ static void ar9003_mci_send_coex_version_query(struct ath_hw *ah,
127 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; 126 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
128 u32 payload[4] = {0, 0, 0, 0}; 127 u32 payload[4] = {0, 0, 0, 0};
129 128
130 if (!mci->bt_version_known && 129 if (mci->bt_version_known ||
131 (mci->bt_state != MCI_BT_SLEEP)) { 130 (mci->bt_state == MCI_BT_SLEEP))
132 MCI_GPM_SET_TYPE_OPCODE(payload, 131 return;
133 MCI_GPM_COEX_AGENT, 132
134 MCI_GPM_COEX_VERSION_QUERY); 133 MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
135 ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, 134 MCI_GPM_COEX_VERSION_QUERY);
136 wait_done, true); 135 ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true);
137 }
138} 136}
139 137
140static void ar9003_mci_send_coex_version_response(struct ath_hw *ah, 138static void ar9003_mci_send_coex_version_response(struct ath_hw *ah,
@@ -158,15 +156,14 @@ static void ar9003_mci_send_coex_wlan_channels(struct ath_hw *ah,
158 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; 156 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
159 u32 *payload = &mci->wlan_channels[0]; 157 u32 *payload = &mci->wlan_channels[0];
160 158
161 if ((mci->wlan_channels_update == true) && 159 if (!mci->wlan_channels_update ||
162 (mci->bt_state != MCI_BT_SLEEP)) { 160 (mci->bt_state == MCI_BT_SLEEP))
163 MCI_GPM_SET_TYPE_OPCODE(payload, 161 return;
164 MCI_GPM_COEX_AGENT, 162
165 MCI_GPM_COEX_WLAN_CHANNELS); 163 MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
166 ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, 164 MCI_GPM_COEX_WLAN_CHANNELS);
167 wait_done, true); 165 ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true);
168 MCI_GPM_SET_TYPE_OPCODE(payload, 0xff, 0xff); 166 MCI_GPM_SET_TYPE_OPCODE(payload, 0xff, 0xff);
169 }
170} 167}
171 168
172static void ar9003_mci_send_coex_bt_status_query(struct ath_hw *ah, 169static void ar9003_mci_send_coex_bt_status_query(struct ath_hw *ah,
@@ -174,29 +171,30 @@ static void ar9003_mci_send_coex_bt_status_query(struct ath_hw *ah,
174{ 171{
175 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; 172 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
176 u32 payload[4] = {0, 0, 0, 0}; 173 u32 payload[4] = {0, 0, 0, 0};
177 bool query_btinfo = !!(query_type & (MCI_GPM_COEX_QUERY_BT_ALL_INFO | 174 bool query_btinfo;
178 MCI_GPM_COEX_QUERY_BT_TOPOLOGY));
179 175
180 if (mci->bt_state != MCI_BT_SLEEP) { 176 if (mci->bt_state == MCI_BT_SLEEP)
177 return;
181 178
182 MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT, 179 query_btinfo = !!(query_type & (MCI_GPM_COEX_QUERY_BT_ALL_INFO |
183 MCI_GPM_COEX_STATUS_QUERY); 180 MCI_GPM_COEX_QUERY_BT_TOPOLOGY));
181 MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
182 MCI_GPM_COEX_STATUS_QUERY);
184 183
185 *(((u8 *)payload) + MCI_GPM_COEX_B_BT_BITMAP) = query_type; 184 *(((u8 *)payload) + MCI_GPM_COEX_B_BT_BITMAP) = query_type;
186
187 /*
188 * If bt_status_query message is not sent successfully,
189 * then need_flush_btinfo should be set again.
190 */
191 if (!ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
192 wait_done, true)) {
193 if (query_btinfo)
194 mci->need_flush_btinfo = true;
195 }
196 185
186 /*
187 * If bt_status_query message is not sent successfully,
188 * then need_flush_btinfo should be set again.
189 */
190 if (!ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
191 wait_done, true)) {
197 if (query_btinfo) 192 if (query_btinfo)
198 mci->query_bt = false; 193 mci->need_flush_btinfo = true;
199 } 194 }
195
196 if (query_btinfo)
197 mci->query_bt = false;
200} 198}
201 199
202static void ar9003_mci_send_coex_halt_bt_gpm(struct ath_hw *ah, bool halt, 200static void ar9003_mci_send_coex_halt_bt_gpm(struct ath_hw *ah, bool halt,
@@ -241,73 +239,73 @@ static void ar9003_mci_prep_interface(struct ath_hw *ah)
241 ar9003_mci_remote_reset(ah, true); 239 ar9003_mci_remote_reset(ah, true);
242 ar9003_mci_send_req_wake(ah, true); 240 ar9003_mci_send_req_wake(ah, true);
243 241
244 if (ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, 242 if (!ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
245 AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING, 500)) { 243 AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING, 500))
244 goto clear_redunt;
246 245
247 mci->bt_state = MCI_BT_AWAKE; 246 mci->bt_state = MCI_BT_AWAKE;
248 247
249 /* 248 /*
250 * we don't need to send more remote_reset at this moment. 249 * we don't need to send more remote_reset at this moment.
251 * If BT receive first remote_reset, then BT HW will 250 * If BT receive first remote_reset, then BT HW will
252 * be cleaned up and will be able to receive req_wake 251 * be cleaned up and will be able to receive req_wake
253 * and BT HW will respond sys_waking. 252 * and BT HW will respond sys_waking.
254 * In this case, WLAN will receive BT's HW sys_waking. 253 * In this case, WLAN will receive BT's HW sys_waking.
255 * Otherwise, if BT SW missed initial remote_reset, 254 * Otherwise, if BT SW missed initial remote_reset,
256 * that remote_reset will still clean up BT MCI RX, 255 * that remote_reset will still clean up BT MCI RX,
257 * and the req_wake will wake BT up, 256 * and the req_wake will wake BT up,
258 * and BT SW will respond this req_wake with a remote_reset and 257 * and BT SW will respond this req_wake with a remote_reset and
259 * sys_waking. In this case, WLAN will receive BT's SW 258 * sys_waking. In this case, WLAN will receive BT's SW
260 * sys_waking. In either case, BT's RX is cleaned up. So we 259 * sys_waking. In either case, BT's RX is cleaned up. So we
261 * don't need to reply BT's remote_reset now, if any. 260 * don't need to reply BT's remote_reset now, if any.
262 * Similarly, if in any case, WLAN can receive BT's sys_waking, 261 * Similarly, if in any case, WLAN can receive BT's sys_waking,
263 * that means WLAN's RX is also fine. 262 * that means WLAN's RX is also fine.
264 */ 263 */
265 ar9003_mci_send_sys_waking(ah, true); 264 ar9003_mci_send_sys_waking(ah, true);
266 udelay(10); 265 udelay(10);
267 266
268 /* 267 /*
269 * Set BT priority interrupt value to be 0xff to 268 * Set BT priority interrupt value to be 0xff to
270 * avoid having too many BT PRIORITY interrupts. 269 * avoid having too many BT PRIORITY interrupts.
271 */ 270 */
272 REG_WRITE(ah, AR_MCI_BT_PRI0, 0xFFFFFFFF); 271 REG_WRITE(ah, AR_MCI_BT_PRI0, 0xFFFFFFFF);
273 REG_WRITE(ah, AR_MCI_BT_PRI1, 0xFFFFFFFF); 272 REG_WRITE(ah, AR_MCI_BT_PRI1, 0xFFFFFFFF);
274 REG_WRITE(ah, AR_MCI_BT_PRI2, 0xFFFFFFFF); 273 REG_WRITE(ah, AR_MCI_BT_PRI2, 0xFFFFFFFF);
275 REG_WRITE(ah, AR_MCI_BT_PRI3, 0xFFFFFFFF); 274 REG_WRITE(ah, AR_MCI_BT_PRI3, 0xFFFFFFFF);
276 REG_WRITE(ah, AR_MCI_BT_PRI, 0X000000FF); 275 REG_WRITE(ah, AR_MCI_BT_PRI, 0X000000FF);
277 276
278 /* 277 /*
279 * A contention reset will be received after send out 278 * A contention reset will be received after send out
280 * sys_waking. Also BT priority interrupt bits will be set. 279 * sys_waking. Also BT priority interrupt bits will be set.
281 * Clear those bits before the next step. 280 * Clear those bits before the next step.
282 */ 281 */
283 282
284 REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, 283 REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
285 AR_MCI_INTERRUPT_RX_MSG_CONT_RST); 284 AR_MCI_INTERRUPT_RX_MSG_CONT_RST);
286 REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, 285 REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, AR_MCI_INTERRUPT_BT_PRI);
287 AR_MCI_INTERRUPT_BT_PRI);
288 286
289 if (mci->is_2g) { 287 if (mci->is_2g) {
290 ar9003_mci_send_lna_transfer(ah, true); 288 ar9003_mci_send_lna_transfer(ah, true);
291 udelay(5); 289 udelay(5);
292 } 290 }
293 291
294 if ((mci->is_2g && !mci->update_2g5g)) { 292 if ((mci->is_2g && !mci->update_2g5g)) {
295 if (ar9003_mci_wait_for_interrupt(ah, 293 if (ar9003_mci_wait_for_interrupt(ah,
296 AR_MCI_INTERRUPT_RX_MSG_RAW, 294 AR_MCI_INTERRUPT_RX_MSG_RAW,
297 AR_MCI_INTERRUPT_RX_MSG_LNA_INFO, 295 AR_MCI_INTERRUPT_RX_MSG_LNA_INFO,
298 mci_timeout)) 296 mci_timeout))
299 ath_dbg(common, MCI, 297 ath_dbg(common, MCI,
300 "MCI WLAN has control over the LNA & BT obeys it\n"); 298 "MCI WLAN has control over the LNA & BT obeys it\n");
301 else 299 else
302 ath_dbg(common, MCI, 300 ath_dbg(common, MCI,
303 "MCI BT didn't respond to LNA_TRANS\n"); 301 "MCI BT didn't respond to LNA_TRANS\n");
304 }
305 } 302 }
306 303
304clear_redunt:
307 /* Clear the extra redundant SYS_WAKING from BT */ 305 /* Clear the extra redundant SYS_WAKING from BT */
308 if ((mci->bt_state == MCI_BT_AWAKE) && 306 if ((mci->bt_state == MCI_BT_AWAKE) &&
309 (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, 307 (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
310 AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING)) && 308 AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING)) &&
311 (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, 309 (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
312 AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) == 0)) { 310 AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) == 0)) {
313 REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, 311 REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
@@ -323,14 +321,13 @@ void ar9003_mci_set_full_sleep(struct ath_hw *ah)
323{ 321{
324 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; 322 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
325 323
326 if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) && 324 if (ar9003_mci_state(ah, MCI_STATE_ENABLE) &&
327 (mci->bt_state != MCI_BT_SLEEP) && 325 (mci->bt_state != MCI_BT_SLEEP) &&
328 !mci->halted_bt_gpm) { 326 !mci->halted_bt_gpm) {
329 ar9003_mci_send_coex_halt_bt_gpm(ah, true, true); 327 ar9003_mci_send_coex_halt_bt_gpm(ah, true, true);
330 } 328 }
331 329
332 mci->ready = false; 330 mci->ready = false;
333 REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
334} 331}
335 332
336static void ar9003_mci_disable_interrupt(struct ath_hw *ah) 333static void ar9003_mci_disable_interrupt(struct ath_hw *ah)
@@ -487,7 +484,7 @@ static void ar9003_mci_sync_bt_state(struct ath_hw *ah)
487 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; 484 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
488 u32 cur_bt_state; 485 u32 cur_bt_state;
489 486
490 cur_bt_state = ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL); 487 cur_bt_state = ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP);
491 488
492 if (mci->bt_state != cur_bt_state) 489 if (mci->bt_state != cur_bt_state)
493 mci->bt_state = cur_bt_state; 490 mci->bt_state = cur_bt_state;
@@ -596,8 +593,7 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
596 if (!time_out) 593 if (!time_out)
597 break; 594 break;
598 595
599 offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET, 596 offset = ar9003_mci_get_next_gpm_offset(ah, false, &more_data);
600 &more_data);
601 597
602 if (offset == MCI_GPM_INVALID) 598 if (offset == MCI_GPM_INVALID)
603 continue; 599 continue;
@@ -615,9 +611,9 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
615 } 611 }
616 break; 612 break;
617 } 613 }
618 } else if ((recv_type == gpm_type) && (recv_opcode == gpm_opcode)) { 614 } else if ((recv_type == gpm_type) &&
615 (recv_opcode == gpm_opcode))
619 break; 616 break;
620 }
621 617
622 /* 618 /*
623 * check if it's cal_grant 619 * check if it's cal_grant
@@ -661,8 +657,7 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
661 time_out = 0; 657 time_out = 0;
662 658
663 while (more_data == MCI_GPM_MORE) { 659 while (more_data == MCI_GPM_MORE) {
664 offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET, 660 offset = ar9003_mci_get_next_gpm_offset(ah, false, &more_data);
665 &more_data);
666 if (offset == MCI_GPM_INVALID) 661 if (offset == MCI_GPM_INVALID)
667 break; 662 break;
668 663
@@ -731,38 +726,38 @@ int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
731 if (!IS_CHAN_2GHZ(chan) || (mci_hw->bt_state != MCI_BT_SLEEP)) 726 if (!IS_CHAN_2GHZ(chan) || (mci_hw->bt_state != MCI_BT_SLEEP))
732 goto exit; 727 goto exit;
733 728
734 if (ar9003_mci_check_int(ah, AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET) || 729 if (!ar9003_mci_check_int(ah, AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET) &&
735 ar9003_mci_check_int(ah, AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)) { 730 !ar9003_mci_check_int(ah, AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE))
731 goto exit;
736 732
737 /* 733 /*
738 * BT is sleeping. Check if BT wakes up during 734 * BT is sleeping. Check if BT wakes up during
739 * WLAN calibration. If BT wakes up during 735 * WLAN calibration. If BT wakes up during
740 * WLAN calibration, need to go through all 736 * WLAN calibration, need to go through all
741 * message exchanges again and recal. 737 * message exchanges again and recal.
742 */ 738 */
743 REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, 739 REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
744 AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET | 740 (AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET |
745 AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE); 741 AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE));
746 742
747 ar9003_mci_remote_reset(ah, true); 743 ar9003_mci_remote_reset(ah, true);
748 ar9003_mci_send_sys_waking(ah, true); 744 ar9003_mci_send_sys_waking(ah, true);
749 udelay(1); 745 udelay(1);
750 746
751 if (IS_CHAN_2GHZ(chan)) 747 if (IS_CHAN_2GHZ(chan))
752 ar9003_mci_send_lna_transfer(ah, true); 748 ar9003_mci_send_lna_transfer(ah, true);
753 749
754 mci_hw->bt_state = MCI_BT_AWAKE; 750 mci_hw->bt_state = MCI_BT_AWAKE;
755 751
756 if (caldata) { 752 if (caldata) {
757 caldata->done_txiqcal_once = false; 753 caldata->done_txiqcal_once = false;
758 caldata->done_txclcal_once = false; 754 caldata->done_txclcal_once = false;
759 caldata->rtt_done = false; 755 caldata->rtt_done = false;
760 } 756 }
761 757
762 if (!ath9k_hw_init_cal(ah, chan)) 758 if (!ath9k_hw_init_cal(ah, chan))
763 return -EIO; 759 return -EIO;
764 760
765 }
766exit: 761exit:
767 ar9003_mci_enable_interrupt(ah); 762 ar9003_mci_enable_interrupt(ah);
768 return 0; 763 return 0;
@@ -772,10 +767,6 @@ static void ar9003_mci_mute_bt(struct ath_hw *ah)
772{ 767{
773 /* disable all MCI messages */ 768 /* disable all MCI messages */
774 REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, 0xffff0000); 769 REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, 0xffff0000);
775 REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS0, 0xffffffff);
776 REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS1, 0xffffffff);
777 REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS2, 0xffffffff);
778 REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS3, 0xffffffff);
779 REG_SET_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE); 770 REG_SET_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
780 771
781 /* wait pending HW messages to flush out */ 772 /* wait pending HW messages to flush out */
@@ -798,29 +789,27 @@ static void ar9003_mci_osla_setup(struct ath_hw *ah, bool enable)
798 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; 789 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
799 u32 thresh; 790 u32 thresh;
800 791
801 if (enable) { 792 if (!enable) {
802 REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
803 AR_MCI_SCHD_TABLE_2_HW_BASED, 1);
804 REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
805 AR_MCI_SCHD_TABLE_2_MEM_BASED, 1);
806
807 if (!(mci->config & ATH_MCI_CONFIG_DISABLE_AGGR_THRESH)) {
808 thresh = MS(mci->config, ATH_MCI_CONFIG_AGGR_THRESH);
809 REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
810 AR_BTCOEX_CTRL_AGGR_THRESH, thresh);
811 REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
812 AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN, 1);
813 } else {
814 REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
815 AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN, 0);
816 }
817
818 REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
819 AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN, 1);
820 } else {
821 REG_CLR_BIT(ah, AR_BTCOEX_CTRL, 793 REG_CLR_BIT(ah, AR_BTCOEX_CTRL,
822 AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN); 794 AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
795 return;
823 } 796 }
797 REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2, AR_MCI_SCHD_TABLE_2_HW_BASED, 1);
798 REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
799 AR_MCI_SCHD_TABLE_2_MEM_BASED, 1);
800
801 if (!(mci->config & ATH_MCI_CONFIG_DISABLE_AGGR_THRESH)) {
802 thresh = MS(mci->config, ATH_MCI_CONFIG_AGGR_THRESH);
803 REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
804 AR_BTCOEX_CTRL_AGGR_THRESH, thresh);
805 REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
806 AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN, 1);
807 } else
808 REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
809 AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN, 0);
810
811 REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
812 AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN, 1);
824} 813}
825 814
826void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g, 815void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
@@ -898,13 +887,16 @@ void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
898 udelay(100); 887 udelay(100);
899 } 888 }
900 889
890 /* Check pending GPM msg before MCI Reset Rx */
891 ar9003_mci_check_gpm_offset(ah);
892
901 regval |= SM(1, AR_MCI_COMMAND2_RESET_RX); 893 regval |= SM(1, AR_MCI_COMMAND2_RESET_RX);
902 REG_WRITE(ah, AR_MCI_COMMAND2, regval); 894 REG_WRITE(ah, AR_MCI_COMMAND2, regval);
903 udelay(1); 895 udelay(1);
904 regval &= ~SM(1, AR_MCI_COMMAND2_RESET_RX); 896 regval &= ~SM(1, AR_MCI_COMMAND2_RESET_RX);
905 REG_WRITE(ah, AR_MCI_COMMAND2, regval); 897 REG_WRITE(ah, AR_MCI_COMMAND2, regval);
906 898
907 ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET, NULL); 899 ar9003_mci_get_next_gpm_offset(ah, true, NULL);
908 900
909 REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, 901 REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE,
910 (SM(0xe801, AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR) | 902 (SM(0xe801, AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR) |
@@ -943,26 +935,27 @@ static void ar9003_mci_send_2g5g_status(struct ath_hw *ah, bool wait_done)
943 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; 935 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
944 u32 new_flags, to_set, to_clear; 936 u32 new_flags, to_set, to_clear;
945 937
946 if (mci->update_2g5g && (mci->bt_state != MCI_BT_SLEEP)) { 938 if (!mci->update_2g5g || (mci->bt_state == MCI_BT_SLEEP))
947 if (mci->is_2g) { 939 return;
948 new_flags = MCI_2G_FLAGS; 940
949 to_clear = MCI_2G_FLAGS_CLEAR_MASK; 941 if (mci->is_2g) {
950 to_set = MCI_2G_FLAGS_SET_MASK; 942 new_flags = MCI_2G_FLAGS;
951 } else { 943 to_clear = MCI_2G_FLAGS_CLEAR_MASK;
952 new_flags = MCI_5G_FLAGS; 944 to_set = MCI_2G_FLAGS_SET_MASK;
953 to_clear = MCI_5G_FLAGS_CLEAR_MASK; 945 } else {
954 to_set = MCI_5G_FLAGS_SET_MASK; 946 new_flags = MCI_5G_FLAGS;
955 } 947 to_clear = MCI_5G_FLAGS_CLEAR_MASK;
948 to_set = MCI_5G_FLAGS_SET_MASK;
949 }
956 950
957 if (to_clear) 951 if (to_clear)
958 ar9003_mci_send_coex_bt_flags(ah, wait_done, 952 ar9003_mci_send_coex_bt_flags(ah, wait_done,
959 MCI_GPM_COEX_BT_FLAGS_CLEAR, 953 MCI_GPM_COEX_BT_FLAGS_CLEAR,
960 to_clear); 954 to_clear);
961 if (to_set) 955 if (to_set)
962 ar9003_mci_send_coex_bt_flags(ah, wait_done, 956 ar9003_mci_send_coex_bt_flags(ah, wait_done,
963 MCI_GPM_COEX_BT_FLAGS_SET, 957 MCI_GPM_COEX_BT_FLAGS_SET,
964 to_set); 958 to_set);
965 }
966} 959}
967 960
968static void ar9003_mci_queue_unsent_gpm(struct ath_hw *ah, u8 header, 961static void ar9003_mci_queue_unsent_gpm(struct ath_hw *ah, u8 header,
@@ -1014,38 +1007,36 @@ static void ar9003_mci_queue_unsent_gpm(struct ath_hw *ah, u8 header,
1014 } 1007 }
1015} 1008}
1016 1009
1017void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done) 1010void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool force)
1018{ 1011{
1019 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; 1012 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
1020 1013
1021 if (mci->update_2g5g) { 1014 if (!mci->update_2g5g && !force)
1022 if (mci->is_2g) { 1015 return;
1023 ar9003_mci_send_2g5g_status(ah, true);
1024 ar9003_mci_send_lna_transfer(ah, true);
1025 udelay(5);
1026 1016
1027 REG_CLR_BIT(ah, AR_MCI_TX_CTRL, 1017 if (mci->is_2g) {
1028 AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE); 1018 ar9003_mci_send_2g5g_status(ah, true);
1029 REG_CLR_BIT(ah, AR_PHY_GLB_CONTROL, 1019 ar9003_mci_send_lna_transfer(ah, true);
1030 AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL); 1020 udelay(5);
1031 1021
1032 if (!(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA)) { 1022 REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
1033 REG_SET_BIT(ah, AR_BTCOEX_CTRL, 1023 AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
1034 AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN); 1024 REG_CLR_BIT(ah, AR_PHY_GLB_CONTROL,
1035 } 1025 AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
1036 } else { 1026
1037 ar9003_mci_send_lna_take(ah, true); 1027 if (!(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA))
1038 udelay(5); 1028 ar9003_mci_osla_setup(ah, true);
1029 } else {
1030 ar9003_mci_send_lna_take(ah, true);
1031 udelay(5);
1039 1032
1040 REG_SET_BIT(ah, AR_MCI_TX_CTRL, 1033 REG_SET_BIT(ah, AR_MCI_TX_CTRL,
1041 AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE); 1034 AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
1042 REG_SET_BIT(ah, AR_PHY_GLB_CONTROL, 1035 REG_SET_BIT(ah, AR_PHY_GLB_CONTROL,
1043 AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL); 1036 AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
1044 REG_CLR_BIT(ah, AR_BTCOEX_CTRL,
1045 AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
1046 1037
1047 ar9003_mci_send_2g5g_status(ah, true); 1038 ar9003_mci_osla_setup(ah, false);
1048 } 1039 ar9003_mci_send_2g5g_status(ah, true);
1049 } 1040 }
1050} 1041}
1051 1042
@@ -1132,7 +1123,7 @@ void ar9003_mci_init_cal_req(struct ath_hw *ah, bool *is_reusable)
1132 if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_GRANT, 0, 50000)) { 1123 if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_GRANT, 0, 50000)) {
1133 ath_dbg(common, MCI, "MCI BT_CAL_GRANT received\n"); 1124 ath_dbg(common, MCI, "MCI BT_CAL_GRANT received\n");
1134 } else { 1125 } else {
1135 is_reusable = false; 1126 *is_reusable = false;
1136 ath_dbg(common, MCI, "MCI BT_CAL_GRANT not received\n"); 1127 ath_dbg(common, MCI, "MCI BT_CAL_GRANT not received\n");
1137 } 1128 }
1138} 1129}
@@ -1173,11 +1164,10 @@ void ar9003_mci_cleanup(struct ath_hw *ah)
1173} 1164}
1174EXPORT_SYMBOL(ar9003_mci_cleanup); 1165EXPORT_SYMBOL(ar9003_mci_cleanup);
1175 1166
1176u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data) 1167u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type)
1177{ 1168{
1178 struct ath_common *common = ath9k_hw_common(ah);
1179 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; 1169 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
1180 u32 value = 0, more_gpm = 0, gpm_ptr; 1170 u32 value = 0;
1181 u8 query_type; 1171 u8 query_type;
1182 1172
1183 switch (state_type) { 1173 switch (state_type) {
@@ -1190,81 +1180,6 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
1190 } 1180 }
1191 value &= AR_BTCOEX_CTRL_MCI_MODE_EN; 1181 value &= AR_BTCOEX_CTRL_MCI_MODE_EN;
1192 break; 1182 break;
1193 case MCI_STATE_INIT_GPM_OFFSET:
1194 value = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
1195 mci->gpm_idx = value;
1196 break;
1197 case MCI_STATE_NEXT_GPM_OFFSET:
1198 case MCI_STATE_LAST_GPM_OFFSET:
1199 /*
1200 * This could be useful to avoid new GPM message interrupt which
1201 * may lead to spurious interrupt after power sleep, or multiple
1202 * entry of ath_mci_intr().
1203 * Adding empty GPM check by returning HAL_MCI_GPM_INVALID can
1204 * alleviate this effect, but clearing GPM RX interrupt bit is
1205 * safe, because whether this is called from hw or driver code
1206 * there must be an interrupt bit set/triggered initially
1207 */
1208 REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
1209 AR_MCI_INTERRUPT_RX_MSG_GPM);
1210
1211 gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
1212 value = gpm_ptr;
1213
1214 if (value == 0)
1215 value = mci->gpm_len - 1;
1216 else if (value >= mci->gpm_len) {
1217 if (value != 0xFFFF)
1218 value = 0;
1219 } else {
1220 value--;
1221 }
1222
1223 if (value == 0xFFFF) {
1224 value = MCI_GPM_INVALID;
1225 more_gpm = MCI_GPM_NOMORE;
1226 } else if (state_type == MCI_STATE_NEXT_GPM_OFFSET) {
1227 if (gpm_ptr == mci->gpm_idx) {
1228 value = MCI_GPM_INVALID;
1229 more_gpm = MCI_GPM_NOMORE;
1230 } else {
1231 for (;;) {
1232 u32 temp_index;
1233
1234 /* skip reserved GPM if any */
1235
1236 if (value != mci->gpm_idx)
1237 more_gpm = MCI_GPM_MORE;
1238 else
1239 more_gpm = MCI_GPM_NOMORE;
1240
1241 temp_index = mci->gpm_idx;
1242 mci->gpm_idx++;
1243
1244 if (mci->gpm_idx >=
1245 mci->gpm_len)
1246 mci->gpm_idx = 0;
1247
1248 if (ar9003_mci_is_gpm_valid(ah,
1249 temp_index)) {
1250 value = temp_index;
1251 break;
1252 }
1253
1254 if (more_gpm == MCI_GPM_NOMORE) {
1255 value = MCI_GPM_INVALID;
1256 break;
1257 }
1258 }
1259 }
1260 if (p_data)
1261 *p_data = more_gpm;
1262 }
1263
1264 if (value != MCI_GPM_INVALID)
1265 value <<= 4;
1266
1267 break;
1268 case MCI_STATE_LAST_SCHD_MSG_OFFSET: 1183 case MCI_STATE_LAST_SCHD_MSG_OFFSET:
1269 value = MS(REG_READ(ah, AR_MCI_RX_STATUS), 1184 value = MS(REG_READ(ah, AR_MCI_RX_STATUS),
1270 AR_MCI_RX_LAST_SCHD_MSG_INDEX); 1185 AR_MCI_RX_LAST_SCHD_MSG_INDEX);
@@ -1276,21 +1191,6 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
1276 AR_MCI_RX_REMOTE_SLEEP) ? 1191 AR_MCI_RX_REMOTE_SLEEP) ?
1277 MCI_BT_SLEEP : MCI_BT_AWAKE; 1192 MCI_BT_SLEEP : MCI_BT_AWAKE;
1278 break; 1193 break;
1279 case MCI_STATE_CONT_RSSI_POWER:
1280 value = MS(mci->cont_status, AR_MCI_CONT_RSSI_POWER);
1281 break;
1282 case MCI_STATE_CONT_PRIORITY:
1283 value = MS(mci->cont_status, AR_MCI_CONT_RRIORITY);
1284 break;
1285 case MCI_STATE_CONT_TXRX:
1286 value = MS(mci->cont_status, AR_MCI_CONT_TXRX);
1287 break;
1288 case MCI_STATE_BT:
1289 value = mci->bt_state;
1290 break;
1291 case MCI_STATE_SET_BT_SLEEP:
1292 mci->bt_state = MCI_BT_SLEEP;
1293 break;
1294 case MCI_STATE_SET_BT_AWAKE: 1194 case MCI_STATE_SET_BT_AWAKE:
1295 mci->bt_state = MCI_BT_AWAKE; 1195 mci->bt_state = MCI_BT_AWAKE;
1296 ar9003_mci_send_coex_version_query(ah, true); 1196 ar9003_mci_send_coex_version_query(ah, true);
@@ -1299,7 +1199,7 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
1299 if (mci->unhalt_bt_gpm) 1199 if (mci->unhalt_bt_gpm)
1300 ar9003_mci_send_coex_halt_bt_gpm(ah, false, true); 1200 ar9003_mci_send_coex_halt_bt_gpm(ah, false, true);
1301 1201
1302 ar9003_mci_2g5g_switch(ah, true); 1202 ar9003_mci_2g5g_switch(ah, false);
1303 break; 1203 break;
1304 case MCI_STATE_SET_BT_CAL_START: 1204 case MCI_STATE_SET_BT_CAL_START:
1305 mci->bt_state = MCI_BT_CAL_START; 1205 mci->bt_state = MCI_BT_CAL_START;
@@ -1323,34 +1223,6 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
1323 case MCI_STATE_SEND_WLAN_COEX_VERSION: 1223 case MCI_STATE_SEND_WLAN_COEX_VERSION:
1324 ar9003_mci_send_coex_version_response(ah, true); 1224 ar9003_mci_send_coex_version_response(ah, true);
1325 break; 1225 break;
1326 case MCI_STATE_SET_BT_COEX_VERSION:
1327 if (!p_data)
1328 ath_dbg(common, MCI,
1329 "MCI Set BT Coex version with NULL data!!\n");
1330 else {
1331 mci->bt_ver_major = (*p_data >> 8) & 0xff;
1332 mci->bt_ver_minor = (*p_data) & 0xff;
1333 mci->bt_version_known = true;
1334 ath_dbg(common, MCI, "MCI BT version set: %d.%d\n",
1335 mci->bt_ver_major, mci->bt_ver_minor);
1336 }
1337 break;
1338 case MCI_STATE_SEND_WLAN_CHANNELS:
1339 if (p_data) {
1340 if (((mci->wlan_channels[1] & 0xffff0000) ==
1341 (*(p_data + 1) & 0xffff0000)) &&
1342 (mci->wlan_channels[2] == *(p_data + 2)) &&
1343 (mci->wlan_channels[3] == *(p_data + 3)))
1344 break;
1345
1346 mci->wlan_channels[0] = *p_data++;
1347 mci->wlan_channels[1] = *p_data++;
1348 mci->wlan_channels[2] = *p_data++;
1349 mci->wlan_channels[3] = *p_data++;
1350 }
1351 mci->wlan_channels_update = true;
1352 ar9003_mci_send_coex_wlan_channels(ah, true);
1353 break;
1354 case MCI_STATE_SEND_VERSION_QUERY: 1226 case MCI_STATE_SEND_VERSION_QUERY:
1355 ar9003_mci_send_coex_version_query(ah, true); 1227 ar9003_mci_send_coex_version_query(ah, true);
1356 break; 1228 break;
@@ -1358,38 +1230,16 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
1358 query_type = MCI_GPM_COEX_QUERY_BT_TOPOLOGY; 1230 query_type = MCI_GPM_COEX_QUERY_BT_TOPOLOGY;
1359 ar9003_mci_send_coex_bt_status_query(ah, true, query_type); 1231 ar9003_mci_send_coex_bt_status_query(ah, true, query_type);
1360 break; 1232 break;
1361 case MCI_STATE_NEED_FLUSH_BT_INFO:
1362 /*
1363 * btcoex_hw.mci.unhalt_bt_gpm means whether it's
1364 * needed to send UNHALT message. It's set whenever
1365 * there's a request to send HALT message.
1366 * mci_halted_bt_gpm means whether HALT message is sent
1367 * out successfully.
1368 *
1369 * Checking (mci_unhalt_bt_gpm == false) instead of
1370 * checking (ah->mci_halted_bt_gpm == false) will make
1371 * sure currently is in UNHALT-ed mode and BT can
1372 * respond to status query.
1373 */
1374 value = (!mci->unhalt_bt_gpm &&
1375 mci->need_flush_btinfo) ? 1 : 0;
1376 if (p_data)
1377 mci->need_flush_btinfo =
1378 (*p_data != 0) ? true : false;
1379 break;
1380 case MCI_STATE_RECOVER_RX: 1233 case MCI_STATE_RECOVER_RX:
1381 ar9003_mci_prep_interface(ah); 1234 ar9003_mci_prep_interface(ah);
1382 mci->query_bt = true; 1235 mci->query_bt = true;
1383 mci->need_flush_btinfo = true; 1236 mci->need_flush_btinfo = true;
1384 ar9003_mci_send_coex_wlan_channels(ah, true); 1237 ar9003_mci_send_coex_wlan_channels(ah, true);
1385 ar9003_mci_2g5g_switch(ah, true); 1238 ar9003_mci_2g5g_switch(ah, false);
1386 break; 1239 break;
1387 case MCI_STATE_NEED_FTP_STOMP: 1240 case MCI_STATE_NEED_FTP_STOMP:
1388 value = !(mci->config & ATH_MCI_CONFIG_DISABLE_FTP_STOMP); 1241 value = !(mci->config & ATH_MCI_CONFIG_DISABLE_FTP_STOMP);
1389 break; 1242 break;
1390 case MCI_STATE_NEED_TUNING:
1391 value = !(mci->config & ATH_MCI_CONFIG_DISABLE_TUNING);
1392 break;
1393 default: 1243 default:
1394 break; 1244 break;
1395 } 1245 }
@@ -1397,3 +1247,173 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
1397 return value; 1247 return value;
1398} 1248}
1399EXPORT_SYMBOL(ar9003_mci_state); 1249EXPORT_SYMBOL(ar9003_mci_state);
1250
1251void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah)
1252{
1253 struct ath_common *common = ath9k_hw_common(ah);
1254 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
1255
1256 ath_dbg(common, MCI, "Give LNA and SPDT control to BT\n");
1257
1258 ar9003_mci_send_lna_take(ah, true);
1259 udelay(50);
1260
1261 REG_SET_BIT(ah, AR_PHY_GLB_CONTROL, AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
1262 mci->is_2g = false;
1263 mci->update_2g5g = true;
1264 ar9003_mci_send_2g5g_status(ah, true);
1265
1266 /* Force another 2g5g update at next scanning */
1267 mci->update_2g5g = true;
1268}
1269
1270void ar9003_mci_set_power_awake(struct ath_hw *ah)
1271{
1272 u32 btcoex_ctrl2, diag_sw;
1273 int i;
1274 u8 lna_ctrl, bt_sleep;
1275
1276 for (i = 0; i < AH_WAIT_TIMEOUT; i++) {
1277 btcoex_ctrl2 = REG_READ(ah, AR_BTCOEX_CTRL2);
1278 if (btcoex_ctrl2 != 0xdeadbeef)
1279 break;
1280 udelay(AH_TIME_QUANTUM);
1281 }
1282 REG_WRITE(ah, AR_BTCOEX_CTRL2, (btcoex_ctrl2 | BIT(23)));
1283
1284 for (i = 0; i < AH_WAIT_TIMEOUT; i++) {
1285 diag_sw = REG_READ(ah, AR_DIAG_SW);
1286 if (diag_sw != 0xdeadbeef)
1287 break;
1288 udelay(AH_TIME_QUANTUM);
1289 }
1290 REG_WRITE(ah, AR_DIAG_SW, (diag_sw | BIT(27) | BIT(19) | BIT(18)));
1291 lna_ctrl = REG_READ(ah, AR_OBS_BUS_CTRL) & 0x3;
1292 bt_sleep = REG_READ(ah, AR_MCI_RX_STATUS) & AR_MCI_RX_REMOTE_SLEEP;
1293
1294 REG_WRITE(ah, AR_BTCOEX_CTRL2, btcoex_ctrl2);
1295 REG_WRITE(ah, AR_DIAG_SW, diag_sw);
1296
1297 if (bt_sleep && (lna_ctrl == 2)) {
1298 REG_SET_BIT(ah, AR_BTCOEX_RC, 0x1);
1299 REG_CLR_BIT(ah, AR_BTCOEX_RC, 0x1);
1300 udelay(50);
1301 }
1302}
1303
1304void ar9003_mci_check_gpm_offset(struct ath_hw *ah)
1305{
1306 struct ath_common *common = ath9k_hw_common(ah);
1307 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
1308 u32 offset;
1309
1310 /*
1311 * This should only be called before "MAC Warm Reset" or "MCI Reset Rx".
1312 */
1313 offset = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
1314 if (mci->gpm_idx == offset)
1315 return;
1316 ath_dbg(common, MCI, "GPM cached write pointer mismatch %d %d\n",
1317 mci->gpm_idx, offset);
1318 mci->query_bt = true;
1319 mci->need_flush_btinfo = true;
1320 mci->gpm_idx = 0;
1321}
1322
1323u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more)
1324{
1325 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
1326 u32 offset, more_gpm = 0, gpm_ptr;
1327
1328 if (first) {
1329 gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
1330 mci->gpm_idx = gpm_ptr;
1331 return gpm_ptr;
1332 }
1333
1334 /*
1335 * This could be useful to avoid new GPM message interrupt which
1336 * may lead to spurious interrupt after power sleep, or multiple
1337 * entry of ath_mci_intr().
1338 * Adding empty GPM check by returning HAL_MCI_GPM_INVALID can
1339 * alleviate this effect, but clearing GPM RX interrupt bit is
1340 * safe, because whether this is called from hw or driver code
1341 * there must be an interrupt bit set/triggered initially
1342 */
1343 REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
1344 AR_MCI_INTERRUPT_RX_MSG_GPM);
1345
1346 gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
1347 offset = gpm_ptr;
1348
1349 if (!offset)
1350 offset = mci->gpm_len - 1;
1351 else if (offset >= mci->gpm_len) {
1352 if (offset != 0xFFFF)
1353 offset = 0;
1354 } else {
1355 offset--;
1356 }
1357
1358 if ((offset == 0xFFFF) || (gpm_ptr == mci->gpm_idx)) {
1359 offset = MCI_GPM_INVALID;
1360 more_gpm = MCI_GPM_NOMORE;
1361 goto out;
1362 }
1363 for (;;) {
1364 u32 temp_index;
1365
1366 /* skip reserved GPM if any */
1367
1368 if (offset != mci->gpm_idx)
1369 more_gpm = MCI_GPM_MORE;
1370 else
1371 more_gpm = MCI_GPM_NOMORE;
1372
1373 temp_index = mci->gpm_idx;
1374 mci->gpm_idx++;
1375
1376 if (mci->gpm_idx >= mci->gpm_len)
1377 mci->gpm_idx = 0;
1378
1379 if (ar9003_mci_is_gpm_valid(ah, temp_index)) {
1380 offset = temp_index;
1381 break;
1382 }
1383
1384 if (more_gpm == MCI_GPM_NOMORE) {
1385 offset = MCI_GPM_INVALID;
1386 break;
1387 }
1388 }
1389
1390 if (offset != MCI_GPM_INVALID)
1391 offset <<= 4;
1392out:
1393 if (more)
1394 *more = more_gpm;
1395
1396 return offset;
1397}
1398EXPORT_SYMBOL(ar9003_mci_get_next_gpm_offset);
1399
1400void ar9003_mci_set_bt_version(struct ath_hw *ah, u8 major, u8 minor)
1401{
1402 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
1403
1404 mci->bt_ver_major = major;
1405 mci->bt_ver_minor = minor;
1406 mci->bt_version_known = true;
1407 ath_dbg(ath9k_hw_common(ah), MCI, "MCI BT version set: %d.%d\n",
1408 mci->bt_ver_major, mci->bt_ver_minor);
1409}
1410EXPORT_SYMBOL(ar9003_mci_set_bt_version);
1411
1412void ar9003_mci_send_wlan_channels(struct ath_hw *ah)
1413{
1414 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
1415
1416 mci->wlan_channels_update = true;
1417 ar9003_mci_send_coex_wlan_channels(ah, true);
1418}
1419EXPORT_SYMBOL(ar9003_mci_send_wlan_channels);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.h b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
index 4842f6c06b8c..d33b8e128855 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
@@ -189,30 +189,18 @@ enum mci_bt_state {
189/* Type of state query */ 189/* Type of state query */
190enum mci_state_type { 190enum mci_state_type {
191 MCI_STATE_ENABLE, 191 MCI_STATE_ENABLE,
192 MCI_STATE_INIT_GPM_OFFSET,
193 MCI_STATE_NEXT_GPM_OFFSET,
194 MCI_STATE_LAST_GPM_OFFSET,
195 MCI_STATE_BT,
196 MCI_STATE_SET_BT_SLEEP,
197 MCI_STATE_SET_BT_AWAKE, 192 MCI_STATE_SET_BT_AWAKE,
198 MCI_STATE_SET_BT_CAL_START, 193 MCI_STATE_SET_BT_CAL_START,
199 MCI_STATE_SET_BT_CAL, 194 MCI_STATE_SET_BT_CAL,
200 MCI_STATE_LAST_SCHD_MSG_OFFSET, 195 MCI_STATE_LAST_SCHD_MSG_OFFSET,
201 MCI_STATE_REMOTE_SLEEP, 196 MCI_STATE_REMOTE_SLEEP,
202 MCI_STATE_CONT_RSSI_POWER,
203 MCI_STATE_CONT_PRIORITY,
204 MCI_STATE_CONT_TXRX,
205 MCI_STATE_RESET_REQ_WAKE, 197 MCI_STATE_RESET_REQ_WAKE,
206 MCI_STATE_SEND_WLAN_COEX_VERSION, 198 MCI_STATE_SEND_WLAN_COEX_VERSION,
207 MCI_STATE_SET_BT_COEX_VERSION,
208 MCI_STATE_SEND_WLAN_CHANNELS,
209 MCI_STATE_SEND_VERSION_QUERY, 199 MCI_STATE_SEND_VERSION_QUERY,
210 MCI_STATE_SEND_STATUS_QUERY, 200 MCI_STATE_SEND_STATUS_QUERY,
211 MCI_STATE_NEED_FLUSH_BT_INFO,
212 MCI_STATE_SET_CONCUR_TX_PRI, 201 MCI_STATE_SET_CONCUR_TX_PRI,
213 MCI_STATE_RECOVER_RX, 202 MCI_STATE_RECOVER_RX,
214 MCI_STATE_NEED_FTP_STOMP, 203 MCI_STATE_NEED_FTP_STOMP,
215 MCI_STATE_NEED_TUNING,
216 MCI_STATE_DEBUG, 204 MCI_STATE_DEBUG,
217 MCI_STATE_MAX 205 MCI_STATE_MAX
218}; 206};
@@ -260,28 +248,26 @@ enum mci_gpm_coex_opcode {
260bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag, 248bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
261 u32 *payload, u8 len, bool wait_done, 249 u32 *payload, u8 len, bool wait_done,
262 bool check_bt); 250 bool check_bt);
263u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data); 251u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type);
264void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf, 252void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
265 u16 len, u32 sched_addr); 253 u16 len, u32 sched_addr);
266void ar9003_mci_cleanup(struct ath_hw *ah); 254void ar9003_mci_cleanup(struct ath_hw *ah);
267void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr, 255void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
268 u32 *rx_msg_intr); 256 u32 *rx_msg_intr);
269 257u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more);
258void ar9003_mci_set_bt_version(struct ath_hw *ah, u8 major, u8 minor);
259void ar9003_mci_send_wlan_channels(struct ath_hw *ah);
270/* 260/*
271 * These functions are used by ath9k_hw. 261 * These functions are used by ath9k_hw.
272 */ 262 */
273 263
274#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT 264#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
275 265
276static inline bool ar9003_mci_is_ready(struct ath_hw *ah)
277{
278 return ah->btcoex_hw.mci.ready;
279}
280void ar9003_mci_stop_bt(struct ath_hw *ah, bool save_fullsleep); 266void ar9003_mci_stop_bt(struct ath_hw *ah, bool save_fullsleep);
281void ar9003_mci_init_cal_req(struct ath_hw *ah, bool *is_reusable); 267void ar9003_mci_init_cal_req(struct ath_hw *ah, bool *is_reusable);
282void ar9003_mci_init_cal_done(struct ath_hw *ah); 268void ar9003_mci_init_cal_done(struct ath_hw *ah);
283void ar9003_mci_set_full_sleep(struct ath_hw *ah); 269void ar9003_mci_set_full_sleep(struct ath_hw *ah);
284void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done); 270void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool force);
285void ar9003_mci_check_bt(struct ath_hw *ah); 271void ar9003_mci_check_bt(struct ath_hw *ah);
286bool ar9003_mci_start_reset(struct ath_hw *ah, struct ath9k_channel *chan); 272bool ar9003_mci_start_reset(struct ath_hw *ah, struct ath9k_channel *chan);
287int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan, 273int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
@@ -289,13 +275,12 @@ int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
289void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g, 275void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
290 bool is_full_sleep); 276 bool is_full_sleep);
291void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked); 277void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked);
278void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah);
279void ar9003_mci_set_power_awake(struct ath_hw *ah);
280void ar9003_mci_check_gpm_offset(struct ath_hw *ah);
292 281
293#else 282#else
294 283
295static inline bool ar9003_mci_is_ready(struct ath_hw *ah)
296{
297 return false;
298}
299static inline void ar9003_mci_stop_bt(struct ath_hw *ah, bool save_fullsleep) 284static inline void ar9003_mci_stop_bt(struct ath_hw *ah, bool save_fullsleep)
300{ 285{
301} 286}
@@ -330,6 +315,15 @@ static inline void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
330static inline void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked) 315static inline void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
331{ 316{
332} 317}
318static inline void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah)
319{
320}
321static inline void ar9003_mci_set_power_awake(struct ath_hw *ah)
322{
323}
324static inline void ar9003_mci_check_gpm_offset(struct ath_hw *ah)
325{
326}
333#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */ 327#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
334 328
335#endif 329#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
index 3d400e8d6535..2c9f7d7ed4cc 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -211,7 +211,7 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah)
211 AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES, 7); 211 AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES, 7);
212 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3, 212 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
213 AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL, 1); 213 AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL, 1);
214 if (AR_SREV_9485(ah) || AR_SREV_9462(ah)) 214 if (AR_SREV_9485(ah) || AR_SREV_9462(ah) || AR_SREV_9550(ah))
215 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3, 215 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
216 AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP, 216 AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP,
217 -3); 217 -3);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 11abb972be1f..d2346dbad6cd 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -99,7 +99,7 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
99 channelSel = (freq * 4) / 120; 99 channelSel = (freq * 4) / 120;
100 chan_frac = (((freq * 4) % 120) * 0x20000) / 120; 100 chan_frac = (((freq * 4) % 120) * 0x20000) / 120;
101 channelSel = (channelSel << 17) | chan_frac; 101 channelSel = (channelSel << 17) | chan_frac;
102 } else if (AR_SREV_9340(ah)) { 102 } else if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) {
103 if (ah->is_clk_25mhz) { 103 if (ah->is_clk_25mhz) {
104 u32 chan_frac; 104 u32 chan_frac;
105 105
@@ -113,7 +113,8 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
113 /* Set to 2G mode */ 113 /* Set to 2G mode */
114 bMode = 1; 114 bMode = 1;
115 } else { 115 } else {
116 if (AR_SREV_9340(ah) && ah->is_clk_25mhz) { 116 if ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) &&
117 ah->is_clk_25mhz) {
117 u32 chan_frac; 118 u32 chan_frac;
118 119
119 channelSel = (freq * 2) / 75; 120 channelSel = (freq * 2) / 75;
@@ -173,16 +174,15 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
173 int cur_bb_spur, negative = 0, cck_spur_freq; 174 int cur_bb_spur, negative = 0, cck_spur_freq;
174 int i; 175 int i;
175 int range, max_spur_cnts, synth_freq; 176 int range, max_spur_cnts, synth_freq;
176 u8 *spur_fbin_ptr = NULL; 177 u8 *spur_fbin_ptr = ar9003_get_spur_chan_ptr(ah, IS_CHAN_2GHZ(chan));
177 178
178 /* 179 /*
179 * Need to verify range +/- 10 MHz in control channel, otherwise spur 180 * Need to verify range +/- 10 MHz in control channel, otherwise spur
180 * is out-of-band and can be ignored. 181 * is out-of-band and can be ignored.
181 */ 182 */
182 183
183 if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah)) { 184 if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) ||
184 spur_fbin_ptr = ar9003_get_spur_chan_ptr(ah, 185 AR_SREV_9550(ah)) {
185 IS_CHAN_2GHZ(chan));
186 if (spur_fbin_ptr[0] == 0) /* No spur */ 186 if (spur_fbin_ptr[0] == 0) /* No spur */
187 return; 187 return;
188 max_spur_cnts = 5; 188 max_spur_cnts = 5;
@@ -207,7 +207,8 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
207 if (AR_SREV_9462(ah) && (i == 0 || i == 3)) 207 if (AR_SREV_9462(ah) && (i == 0 || i == 3))
208 continue; 208 continue;
209 negative = 0; 209 negative = 0;
210 if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah)) 210 if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) ||
211 AR_SREV_9550(ah))
211 cur_bb_spur = ath9k_hw_fbin2freq(spur_fbin_ptr[i], 212 cur_bb_spur = ath9k_hw_fbin2freq(spur_fbin_ptr[i],
212 IS_CHAN_2GHZ(chan)); 213 IS_CHAN_2GHZ(chan));
213 else 214 else
@@ -620,6 +621,50 @@ static void ar9003_hw_prog_ini(struct ath_hw *ah,
620 } 621 }
621} 622}
622 623
624static int ar9550_hw_get_modes_txgain_index(struct ath_hw *ah,
625 struct ath9k_channel *chan)
626{
627 int ret;
628
629 switch (chan->chanmode) {
630 case CHANNEL_A:
631 case CHANNEL_A_HT20:
632 if (chan->channel <= 5350)
633 ret = 1;
634 else if ((chan->channel > 5350) && (chan->channel <= 5600))
635 ret = 3;
636 else
637 ret = 5;
638 break;
639
640 case CHANNEL_A_HT40PLUS:
641 case CHANNEL_A_HT40MINUS:
642 if (chan->channel <= 5350)
643 ret = 2;
644 else if ((chan->channel > 5350) && (chan->channel <= 5600))
645 ret = 4;
646 else
647 ret = 6;
648 break;
649
650 case CHANNEL_G:
651 case CHANNEL_G_HT20:
652 case CHANNEL_B:
653 ret = 8;
654 break;
655
656 case CHANNEL_G_HT40PLUS:
657 case CHANNEL_G_HT40MINUS:
658 ret = 7;
659 break;
660
661 default:
662 ret = -EINVAL;
663 }
664
665 return ret;
666}
667
623static int ar9003_hw_process_ini(struct ath_hw *ah, 668static int ar9003_hw_process_ini(struct ath_hw *ah,
624 struct ath9k_channel *chan) 669 struct ath9k_channel *chan)
625{ 670{
@@ -661,7 +706,22 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
661 } 706 }
662 707
663 REG_WRITE_ARRAY(&ah->iniModesRxGain, 1, regWrites); 708 REG_WRITE_ARRAY(&ah->iniModesRxGain, 1, regWrites);
664 REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites); 709 if (AR_SREV_9550(ah))
710 REG_WRITE_ARRAY(&ah->ini_modes_rx_gain_bounds, modesIndex,
711 regWrites);
712
713 if (AR_SREV_9550(ah)) {
714 int modes_txgain_index;
715
716 modes_txgain_index = ar9550_hw_get_modes_txgain_index(ah, chan);
717 if (modes_txgain_index < 0)
718 return -EINVAL;
719
720 REG_WRITE_ARRAY(&ah->iniModesTxGain, modes_txgain_index,
721 regWrites);
722 } else {
723 REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
724 }
665 725
666 /* 726 /*
667 * For 5GHz channels requiring Fast Clock, apply 727 * For 5GHz channels requiring Fast Clock, apply
@@ -676,6 +736,10 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
676 if (chan->channel == 2484) 736 if (chan->channel == 2484)
677 ar9003_hw_prog_ini(ah, &ah->ini_japan2484, 1); 737 ar9003_hw_prog_ini(ah, &ah->ini_japan2484, 1);
678 738
739 if (AR_SREV_9462(ah))
740 REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE,
741 AR_GLB_SWREG_DISCONT_EN_BT_WLAN);
742
679 ah->modes_index = modesIndex; 743 ah->modes_index = modesIndex;
680 ar9003_hw_override_ini(ah); 744 ar9003_hw_override_ini(ah);
681 ar9003_hw_set_channel_regs(ah, chan); 745 ar9003_hw_set_channel_regs(ah, chan);
@@ -821,18 +885,18 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
821 REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW, 885 REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
822 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW); 886 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
823 887
824 if (!on != aniState->ofdmWeakSigDetectOff) { 888 if (on != aniState->ofdmWeakSigDetect) {
825 ath_dbg(common, ANI, 889 ath_dbg(common, ANI,
826 "** ch %d: ofdm weak signal: %s=>%s\n", 890 "** ch %d: ofdm weak signal: %s=>%s\n",
827 chan->channel, 891 chan->channel,
828 !aniState->ofdmWeakSigDetectOff ? 892 aniState->ofdmWeakSigDetect ?
829 "on" : "off", 893 "on" : "off",
830 on ? "on" : "off"); 894 on ? "on" : "off");
831 if (on) 895 if (on)
832 ah->stats.ast_ani_ofdmon++; 896 ah->stats.ast_ani_ofdmon++;
833 else 897 else
834 ah->stats.ast_ani_ofdmoff++; 898 ah->stats.ast_ani_ofdmoff++;
835 aniState->ofdmWeakSigDetectOff = !on; 899 aniState->ofdmWeakSigDetect = on;
836 } 900 }
837 break; 901 break;
838 } 902 }
@@ -851,7 +915,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
851 * from INI file & cap value 915 * from INI file & cap value
852 */ 916 */
853 value = firstep_table[level] - 917 value = firstep_table[level] -
854 firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] + 918 firstep_table[ATH9K_ANI_FIRSTEP_LVL] +
855 aniState->iniDef.firstep; 919 aniState->iniDef.firstep;
856 if (value < ATH9K_SIG_FIRSTEP_SETTING_MIN) 920 if (value < ATH9K_SIG_FIRSTEP_SETTING_MIN)
857 value = ATH9K_SIG_FIRSTEP_SETTING_MIN; 921 value = ATH9K_SIG_FIRSTEP_SETTING_MIN;
@@ -866,7 +930,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
866 * from INI file & cap value 930 * from INI file & cap value
867 */ 931 */
868 value2 = firstep_table[level] - 932 value2 = firstep_table[level] -
869 firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] + 933 firstep_table[ATH9K_ANI_FIRSTEP_LVL] +
870 aniState->iniDef.firstepLow; 934 aniState->iniDef.firstepLow;
871 if (value2 < ATH9K_SIG_FIRSTEP_SETTING_MIN) 935 if (value2 < ATH9K_SIG_FIRSTEP_SETTING_MIN)
872 value2 = ATH9K_SIG_FIRSTEP_SETTING_MIN; 936 value2 = ATH9K_SIG_FIRSTEP_SETTING_MIN;
@@ -882,7 +946,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
882 chan->channel, 946 chan->channel,
883 aniState->firstepLevel, 947 aniState->firstepLevel,
884 level, 948 level,
885 ATH9K_ANI_FIRSTEP_LVL_NEW, 949 ATH9K_ANI_FIRSTEP_LVL,
886 value, 950 value,
887 aniState->iniDef.firstep); 951 aniState->iniDef.firstep);
888 ath_dbg(common, ANI, 952 ath_dbg(common, ANI,
@@ -890,7 +954,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
890 chan->channel, 954 chan->channel,
891 aniState->firstepLevel, 955 aniState->firstepLevel,
892 level, 956 level,
893 ATH9K_ANI_FIRSTEP_LVL_NEW, 957 ATH9K_ANI_FIRSTEP_LVL,
894 value2, 958 value2,
895 aniState->iniDef.firstepLow); 959 aniState->iniDef.firstepLow);
896 if (level > aniState->firstepLevel) 960 if (level > aniState->firstepLevel)
@@ -915,7 +979,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
915 * from INI file & cap value 979 * from INI file & cap value
916 */ 980 */
917 value = cycpwrThr1_table[level] - 981 value = cycpwrThr1_table[level] -
918 cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] + 982 cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL] +
919 aniState->iniDef.cycpwrThr1; 983 aniState->iniDef.cycpwrThr1;
920 if (value < ATH9K_SIG_SPUR_IMM_SETTING_MIN) 984 if (value < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
921 value = ATH9K_SIG_SPUR_IMM_SETTING_MIN; 985 value = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
@@ -931,7 +995,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
931 * from INI file & cap value 995 * from INI file & cap value
932 */ 996 */
933 value2 = cycpwrThr1_table[level] - 997 value2 = cycpwrThr1_table[level] -
934 cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] + 998 cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL] +
935 aniState->iniDef.cycpwrThr1Ext; 999 aniState->iniDef.cycpwrThr1Ext;
936 if (value2 < ATH9K_SIG_SPUR_IMM_SETTING_MIN) 1000 if (value2 < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
937 value2 = ATH9K_SIG_SPUR_IMM_SETTING_MIN; 1001 value2 = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
@@ -946,7 +1010,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
946 chan->channel, 1010 chan->channel,
947 aniState->spurImmunityLevel, 1011 aniState->spurImmunityLevel,
948 level, 1012 level,
949 ATH9K_ANI_SPUR_IMMUNE_LVL_NEW, 1013 ATH9K_ANI_SPUR_IMMUNE_LVL,
950 value, 1014 value,
951 aniState->iniDef.cycpwrThr1); 1015 aniState->iniDef.cycpwrThr1);
952 ath_dbg(common, ANI, 1016 ath_dbg(common, ANI,
@@ -954,7 +1018,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
954 chan->channel, 1018 chan->channel,
955 aniState->spurImmunityLevel, 1019 aniState->spurImmunityLevel,
956 level, 1020 level,
957 ATH9K_ANI_SPUR_IMMUNE_LVL_NEW, 1021 ATH9K_ANI_SPUR_IMMUNE_LVL,
958 value2, 1022 value2,
959 aniState->iniDef.cycpwrThr1Ext); 1023 aniState->iniDef.cycpwrThr1Ext);
960 if (level > aniState->spurImmunityLevel) 1024 if (level > aniState->spurImmunityLevel)
@@ -975,16 +1039,16 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
975 AR_PHY_MRC_CCK_ENABLE, is_on); 1039 AR_PHY_MRC_CCK_ENABLE, is_on);
976 REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL, 1040 REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
977 AR_PHY_MRC_CCK_MUX_REG, is_on); 1041 AR_PHY_MRC_CCK_MUX_REG, is_on);
978 if (!is_on != aniState->mrcCCKOff) { 1042 if (is_on != aniState->mrcCCK) {
979 ath_dbg(common, ANI, "** ch %d: MRC CCK: %s=>%s\n", 1043 ath_dbg(common, ANI, "** ch %d: MRC CCK: %s=>%s\n",
980 chan->channel, 1044 chan->channel,
981 !aniState->mrcCCKOff ? "on" : "off", 1045 aniState->mrcCCK ? "on" : "off",
982 is_on ? "on" : "off"); 1046 is_on ? "on" : "off");
983 if (is_on) 1047 if (is_on)
984 ah->stats.ast_ani_ccklow++; 1048 ah->stats.ast_ani_ccklow++;
985 else 1049 else
986 ah->stats.ast_ani_cckhigh++; 1050 ah->stats.ast_ani_cckhigh++;
987 aniState->mrcCCKOff = !is_on; 1051 aniState->mrcCCK = is_on;
988 } 1052 }
989 break; 1053 break;
990 } 1054 }
@@ -998,9 +1062,9 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
998 ath_dbg(common, ANI, 1062 ath_dbg(common, ANI,
999 "ANI parameters: SI=%d, ofdmWS=%s FS=%d MRCcck=%s listenTime=%d ofdmErrs=%d cckErrs=%d\n", 1063 "ANI parameters: SI=%d, ofdmWS=%s FS=%d MRCcck=%s listenTime=%d ofdmErrs=%d cckErrs=%d\n",
1000 aniState->spurImmunityLevel, 1064 aniState->spurImmunityLevel,
1001 !aniState->ofdmWeakSigDetectOff ? "on" : "off", 1065 aniState->ofdmWeakSigDetect ? "on" : "off",
1002 aniState->firstepLevel, 1066 aniState->firstepLevel,
1003 !aniState->mrcCCKOff ? "on" : "off", 1067 aniState->mrcCCK ? "on" : "off",
1004 aniState->listenTime, 1068 aniState->listenTime,
1005 aniState->ofdmPhyErrCount, 1069 aniState->ofdmPhyErrCount,
1006 aniState->cckPhyErrCount); 1070 aniState->cckPhyErrCount);
@@ -1107,10 +1171,10 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
1107 AR_PHY_EXT_CYCPWR_THR1); 1171 AR_PHY_EXT_CYCPWR_THR1);
1108 1172
1109 /* these levels just got reset to defaults by the INI */ 1173 /* these levels just got reset to defaults by the INI */
1110 aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL_NEW; 1174 aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
1111 aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW; 1175 aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
1112 aniState->ofdmWeakSigDetectOff = !ATH9K_ANI_USE_OFDM_WEAK_SIG; 1176 aniState->ofdmWeakSigDetect = ATH9K_ANI_USE_OFDM_WEAK_SIG;
1113 aniState->mrcCCKOff = !ATH9K_ANI_ENABLE_MRC_CCK; 1177 aniState->mrcCCK = true;
1114} 1178}
1115 1179
1116static void ar9003_hw_set_radar_params(struct ath_hw *ah, 1180static void ar9003_hw_set_radar_params(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 7268a48a92a1..751c83b21493 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -636,8 +636,8 @@
636 636
637#define AR_CH0_TOP (AR_SREV_9300(ah) ? 0x16288 : \ 637#define AR_CH0_TOP (AR_SREV_9300(ah) ? 0x16288 : \
638 ((AR_SREV_9462(ah) ? 0x1628c : 0x16280))) 638 ((AR_SREV_9462(ah) ? 0x1628c : 0x16280)))
639#define AR_CH0_TOP_XPABIASLVL (0x300) 639#define AR_CH0_TOP_XPABIASLVL (AR_SREV_9550(ah) ? 0x3c0 : 0x300)
640#define AR_CH0_TOP_XPABIASLVL_S (8) 640#define AR_CH0_TOP_XPABIASLVL_S (AR_SREV_9550(ah) ? 6 : 8)
641 641
642#define AR_CH0_THERM (AR_SREV_9300(ah) ? 0x16290 : \ 642#define AR_CH0_THERM (AR_SREV_9300(ah) ? 0x16290 : \
643 ((AR_SREV_9485(ah) ? 0x1628c : 0x16294))) 643 ((AR_SREV_9485(ah) ? 0x1628c : 0x16294)))
@@ -650,6 +650,8 @@
650#define AR_SWITCH_TABLE_COM_ALL_S (0) 650#define AR_SWITCH_TABLE_COM_ALL_S (0)
651#define AR_SWITCH_TABLE_COM_AR9462_ALL (0xffffff) 651#define AR_SWITCH_TABLE_COM_AR9462_ALL (0xffffff)
652#define AR_SWITCH_TABLE_COM_AR9462_ALL_S (0) 652#define AR_SWITCH_TABLE_COM_AR9462_ALL_S (0)
653#define AR_SWITCH_TABLE_COM_AR9550_ALL (0xffffff)
654#define AR_SWITCH_TABLE_COM_AR9550_ALL_S (0)
653#define AR_SWITCH_TABLE_COM_SPDT (0x00f00000) 655#define AR_SWITCH_TABLE_COM_SPDT (0x00f00000)
654#define AR_SWITCH_TABLE_COM_SPDT_ALL (0x0000fff0) 656#define AR_SWITCH_TABLE_COM_SPDT_ALL (0x0000fff0)
655#define AR_SWITCH_TABLE_COM_SPDT_ALL_S (4) 657#define AR_SWITCH_TABLE_COM_SPDT_ALL_S (4)
@@ -820,18 +822,26 @@
820#define AR_PHY_CHAN_INFO_MEMORY_CAPTURE_MASK 0x0001 822#define AR_PHY_CHAN_INFO_MEMORY_CAPTURE_MASK 0x0001
821#define AR_PHY_RX_DELAY_DELAY 0x00003FFF 823#define AR_PHY_RX_DELAY_DELAY 0x00003FFF
822#define AR_PHY_CCK_TX_CTRL_JAPAN 0x00000010 824#define AR_PHY_CCK_TX_CTRL_JAPAN 0x00000010
823#define AR_PHY_SPECTRAL_SCAN_ENABLE 0x00000001 825
824#define AR_PHY_SPECTRAL_SCAN_ENABLE_S 0 826#define AR_PHY_SPECTRAL_SCAN_ENABLE 0x00000001
825#define AR_PHY_SPECTRAL_SCAN_ACTIVE 0x00000002 827#define AR_PHY_SPECTRAL_SCAN_ENABLE_S 0
826#define AR_PHY_SPECTRAL_SCAN_ACTIVE_S 1 828#define AR_PHY_SPECTRAL_SCAN_ACTIVE 0x00000002
827#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD 0x000000F0 829#define AR_PHY_SPECTRAL_SCAN_ACTIVE_S 1
828#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD_S 4 830#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD 0x000000F0
829#define AR_PHY_SPECTRAL_SCAN_PERIOD 0x0000FF00 831#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD_S 4
830#define AR_PHY_SPECTRAL_SCAN_PERIOD_S 8 832#define AR_PHY_SPECTRAL_SCAN_PERIOD 0x0000FF00
831#define AR_PHY_SPECTRAL_SCAN_COUNT 0x00FF0000 833#define AR_PHY_SPECTRAL_SCAN_PERIOD_S 8
832#define AR_PHY_SPECTRAL_SCAN_COUNT_S 16 834#define AR_PHY_SPECTRAL_SCAN_COUNT 0x0FFF0000
833#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x01000000 835#define AR_PHY_SPECTRAL_SCAN_COUNT_S 16
834#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 24 836#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x10000000
837#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 28
838#define AR_PHY_SPECTRAL_SCAN_PRIORITY 0x20000000
839#define AR_PHY_SPECTRAL_SCAN_PRIORITY_S 29
840#define AR_PHY_SPECTRAL_SCAN_USE_ERR5 0x40000000
841#define AR_PHY_SPECTRAL_SCAN_USE_ERR5_S 30
842#define AR_PHY_SPECTRAL_SCAN_COMPRESSED_RPT 0x80000000
843#define AR_PHY_SPECTRAL_SCAN_COMPRESSED_RPT_S 31
844
835#define AR_PHY_CHANNEL_STATUS_RX_CLEAR 0x00000004 845#define AR_PHY_CHANNEL_STATUS_RX_CLEAR 0x00000004
836#define AR_PHY_RTT_CTRL_ENA_RADIO_RETENTION 0x00000001 846#define AR_PHY_RTT_CTRL_ENA_RADIO_RETENTION 0x00000001
837#define AR_PHY_RTT_CTRL_ENA_RADIO_RETENTION_S 0 847#define AR_PHY_RTT_CTRL_ENA_RADIO_RETENTION_S 0
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
index 1bd3a3d22101..6e1756bc3833 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
@@ -337,12 +337,7 @@ static const u32 ar9331_modes_low_ob_db_tx_gain_1p1[][5] = {
337 {0x00016284, 0x14d3f000, 0x14d3f000, 0x14d3f000, 0x14d3f000}, 337 {0x00016284, 0x14d3f000, 0x14d3f000, 0x14d3f000, 0x14d3f000},
338}; 338};
339 339
340static const u32 ar9331_1p1_baseband_core_txfir_coeff_japan_2484[][2] = { 340#define ar9331_1p1_baseband_core_txfir_coeff_japan_2484 ar9462_2p0_baseband_core_txfir_coeff_japan_2484
341 /* Addr allmodes */
342 {0x0000a398, 0x00000000},
343 {0x0000a39c, 0x6f7f0301},
344 {0x0000a3a0, 0xca9228ee},
345};
346 341
347static const u32 ar9331_1p1_xtal_25M[][2] = { 342static const u32 ar9331_1p1_xtal_25M[][2] = {
348 /* Addr allmodes */ 343 /* Addr allmodes */
@@ -783,17 +778,7 @@ static const u32 ar9331_modes_high_power_tx_gain_1p1[][5] = {
783 {0x00016284, 0x14d3f000, 0x14d3f000, 0x14d3f000, 0x14d3f000}, 778 {0x00016284, 0x14d3f000, 0x14d3f000, 0x14d3f000, 0x14d3f000},
784}; 779};
785 780
786static const u32 ar9331_1p1_mac_postamble[][5] = { 781#define ar9331_1p1_mac_postamble ar9300_2p2_mac_postamble
787 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
788 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
789 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
790 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
791 {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
792 {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
793 {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
794 {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
795 {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
796};
797 782
798static const u32 ar9331_1p1_soc_preamble[][2] = { 783static const u32 ar9331_1p1_soc_preamble[][2] = {
799 /* Addr allmodes */ 784 /* Addr allmodes */
@@ -1112,38 +1097,4 @@ static const u32 ar9331_common_tx_gain_offset1_1[][1] = {
1112 {0x00000000}, 1097 {0x00000000},
1113}; 1098};
1114 1099
1115static const u32 ar9331_1p1_chansel_xtal_25M[] = {
1116 0x0101479e,
1117 0x0101d027,
1118 0x010258af,
1119 0x0102e138,
1120 0x010369c0,
1121 0x0103f249,
1122 0x01047ad1,
1123 0x0105035a,
1124 0x01058be2,
1125 0x0106146b,
1126 0x01069cf3,
1127 0x0107257c,
1128 0x0107ae04,
1129 0x0108f5b2,
1130};
1131
1132static const u32 ar9331_1p1_chansel_xtal_40M[] = {
1133 0x00a0ccbe,
1134 0x00a12213,
1135 0x00a17769,
1136 0x00a1ccbe,
1137 0x00a22213,
1138 0x00a27769,
1139 0x00a2ccbe,
1140 0x00a32213,
1141 0x00a37769,
1142 0x00a3ccbe,
1143 0x00a42213,
1144 0x00a47769,
1145 0x00a4ccbe,
1146 0x00a5998b,
1147};
1148
1149#endif /* INITVALS_9330_1P1_H */ 1100#endif /* INITVALS_9330_1P1_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
index 0e6ca0834b34..57ed8a112173 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2011 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
3 * 4 *
4 * Permission to use, copy, modify, and/or distribute this software for any 5 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 6 * purpose with or without fee is hereby granted, provided that the above
@@ -17,8 +18,8 @@
17#ifndef INITVALS_9330_1P2_H 18#ifndef INITVALS_9330_1P2_H
18#define INITVALS_9330_1P2_H 19#define INITVALS_9330_1P2_H
19 20
20static const u32 ar9331_modes_lowest_ob_db_tx_gain_1p2[][5] = { 21static const u32 ar9331_modes_high_ob_db_tx_gain_1p2[][5] = {
21 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 22 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
22 {0x0000a410, 0x000050d7, 0x000050d7, 0x000050d7, 0x000050d7}, 23 {0x0000a410, 0x000050d7, 0x000050d7, 0x000050d7, 0x000050d7},
23 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000}, 24 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
24 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002}, 25 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
@@ -102,8 +103,14 @@ static const u32 ar9331_modes_lowest_ob_db_tx_gain_1p2[][5] = {
102 {0x0000a63c, 0x04011004, 0x04011004, 0x04011004, 0x04011004}, 103 {0x0000a63c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
103}; 104};
104 105
106#define ar9331_modes_high_power_tx_gain_1p2 ar9331_modes_high_ob_db_tx_gain_1p2
107
108#define ar9331_modes_low_ob_db_tx_gain_1p2 ar9331_modes_high_power_tx_gain_1p2
109
110#define ar9331_modes_lowest_ob_db_tx_gain_1p2 ar9331_modes_low_ob_db_tx_gain_1p2
111
105static const u32 ar9331_1p2_baseband_postamble[][5] = { 112static const u32 ar9331_1p2_baseband_postamble[][5] = {
106 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 113 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
107 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005}, 114 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
108 {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e}, 115 {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
109 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0}, 116 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
@@ -147,191 +154,6 @@ static const u32 ar9331_1p2_baseband_postamble[][5] = {
147 {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 154 {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
148}; 155};
149 156
150static const u32 ar9331_modes_high_ob_db_tx_gain_1p2[][5] = {
151 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
152 {0x0000a410, 0x000050d7, 0x000050d7, 0x000050d7, 0x000050d7},
153 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
154 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
155 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
156 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
157 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
158 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
159 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
160 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
161 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x23000a00, 0x23000a00},
162 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x27000a02, 0x27000a02},
163 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2b000a04, 0x2b000a04},
164 {0x0000a52c, 0x41023e85, 0x41023e85, 0x3f001620, 0x3f001620},
165 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x41001621, 0x41001621},
166 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x44001640, 0x44001640},
167 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x46001641, 0x46001641},
168 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x48001642, 0x48001642},
169 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x4b001644, 0x4b001644},
170 {0x0000a544, 0x6502feca, 0x6502feca, 0x4e001a81, 0x4e001a81},
171 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x51001a83, 0x51001a83},
172 {0x0000a54c, 0x7203feca, 0x7203feca, 0x54001c84, 0x54001c84},
173 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x57001ce3, 0x57001ce3},
174 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x5b001ce5, 0x5b001ce5},
175 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5f001ce9, 0x5f001ce9},
176 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x66001eec, 0x66001eec},
177 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x66001eec, 0x66001eec},
178 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001eec, 0x66001eec},
179 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
180 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
181 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
182 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
183 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
184 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
185 {0x0000a580, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
186 {0x0000a584, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
187 {0x0000a588, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
188 {0x0000a58c, 0x11062202, 0x11062202, 0x0b000200, 0x0b000200},
189 {0x0000a590, 0x17022e00, 0x17022e00, 0x0f000202, 0x0f000202},
190 {0x0000a594, 0x1d000ec2, 0x1d000ec2, 0x11000400, 0x11000400},
191 {0x0000a598, 0x25020ec0, 0x25020ec0, 0x15000402, 0x15000402},
192 {0x0000a59c, 0x2b020ec3, 0x2b020ec3, 0x19000404, 0x19000404},
193 {0x0000a5a0, 0x2f001f04, 0x2f001f04, 0x1b000603, 0x1b000603},
194 {0x0000a5a4, 0x35001fc4, 0x35001fc4, 0x1f000a02, 0x1f000a02},
195 {0x0000a5a8, 0x3c022f04, 0x3c022f04, 0x23000a04, 0x23000a04},
196 {0x0000a5ac, 0x41023e85, 0x41023e85, 0x26000a20, 0x26000a20},
197 {0x0000a5b0, 0x48023ec6, 0x48023ec6, 0x2a000e20, 0x2a000e20},
198 {0x0000a5b4, 0x4d023f01, 0x4d023f01, 0x2e000e22, 0x2e000e22},
199 {0x0000a5b8, 0x53023f4b, 0x53023f4b, 0x31000e24, 0x31000e24},
200 {0x0000a5bc, 0x5a027f09, 0x5a027f09, 0x34001640, 0x34001640},
201 {0x0000a5c0, 0x5f027fc9, 0x5f027fc9, 0x38001660, 0x38001660},
202 {0x0000a5c4, 0x6502feca, 0x6502feca, 0x3b001861, 0x3b001861},
203 {0x0000a5c8, 0x6b02ff4a, 0x6b02ff4a, 0x3e001a81, 0x3e001a81},
204 {0x0000a5cc, 0x7203feca, 0x7203feca, 0x42001a83, 0x42001a83},
205 {0x0000a5d0, 0x7703ff0b, 0x7703ff0b, 0x44001c84, 0x44001c84},
206 {0x0000a5d4, 0x7d06ffcb, 0x7d06ffcb, 0x48001ce3, 0x48001ce3},
207 {0x0000a5d8, 0x8407ff0b, 0x8407ff0b, 0x4c001ce5, 0x4c001ce5},
208 {0x0000a5dc, 0x8907ffcb, 0x8907ffcb, 0x50001ce9, 0x50001ce9},
209 {0x0000a5e0, 0x900fff0b, 0x900fff0b, 0x54001ceb, 0x54001ceb},
210 {0x0000a5e4, 0x960fffcb, 0x960fffcb, 0x56001eec, 0x56001eec},
211 {0x0000a5e8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
212 {0x0000a5ec, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
213 {0x0000a5f0, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
214 {0x0000a5f4, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
215 {0x0000a5f8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
216 {0x0000a5fc, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
217 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
218 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
219 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
220 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
221 {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
222 {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
223 {0x0000a618, 0x02008501, 0x02008501, 0x02008501, 0x02008501},
224 {0x0000a61c, 0x02008802, 0x02008802, 0x02008802, 0x02008802},
225 {0x0000a620, 0x0300c802, 0x0300c802, 0x0300c802, 0x0300c802},
226 {0x0000a624, 0x0300cc03, 0x0300cc03, 0x0300cc03, 0x0300cc03},
227 {0x0000a628, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
228 {0x0000a62c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
229 {0x0000a630, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
230 {0x0000a634, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
231 {0x0000a638, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
232 {0x0000a63c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
233};
234
235static const u32 ar9331_modes_low_ob_db_tx_gain_1p2[][5] = {
236 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
237 {0x0000a410, 0x000050d7, 0x000050d7, 0x000050d7, 0x000050d7},
238 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
239 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
240 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
241 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
242 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
243 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
244 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
245 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
246 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x23000a00, 0x23000a00},
247 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x27000a02, 0x27000a02},
248 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2b000a04, 0x2b000a04},
249 {0x0000a52c, 0x41023e85, 0x41023e85, 0x3f001620, 0x3f001620},
250 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x41001621, 0x41001621},
251 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x44001640, 0x44001640},
252 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x46001641, 0x46001641},
253 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x48001642, 0x48001642},
254 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x4b001644, 0x4b001644},
255 {0x0000a544, 0x6502feca, 0x6502feca, 0x4e001a81, 0x4e001a81},
256 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x51001a83, 0x51001a83},
257 {0x0000a54c, 0x7203feca, 0x7203feca, 0x54001c84, 0x54001c84},
258 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x57001ce3, 0x57001ce3},
259 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x5b001ce5, 0x5b001ce5},
260 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5f001ce9, 0x5f001ce9},
261 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x66001eec, 0x66001eec},
262 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x66001eec, 0x66001eec},
263 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001eec, 0x66001eec},
264 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
265 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
266 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
267 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
268 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
269 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
270 {0x0000a580, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
271 {0x0000a584, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
272 {0x0000a588, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
273 {0x0000a58c, 0x11062202, 0x11062202, 0x0b000200, 0x0b000200},
274 {0x0000a590, 0x17022e00, 0x17022e00, 0x0f000202, 0x0f000202},
275 {0x0000a594, 0x1d000ec2, 0x1d000ec2, 0x11000400, 0x11000400},
276 {0x0000a598, 0x25020ec0, 0x25020ec0, 0x15000402, 0x15000402},
277 {0x0000a59c, 0x2b020ec3, 0x2b020ec3, 0x19000404, 0x19000404},
278 {0x0000a5a0, 0x2f001f04, 0x2f001f04, 0x1b000603, 0x1b000603},
279 {0x0000a5a4, 0x35001fc4, 0x35001fc4, 0x1f000a02, 0x1f000a02},
280 {0x0000a5a8, 0x3c022f04, 0x3c022f04, 0x23000a04, 0x23000a04},
281 {0x0000a5ac, 0x41023e85, 0x41023e85, 0x26000a20, 0x26000a20},
282 {0x0000a5b0, 0x48023ec6, 0x48023ec6, 0x2a000e20, 0x2a000e20},
283 {0x0000a5b4, 0x4d023f01, 0x4d023f01, 0x2e000e22, 0x2e000e22},
284 {0x0000a5b8, 0x53023f4b, 0x53023f4b, 0x31000e24, 0x31000e24},
285 {0x0000a5bc, 0x5a027f09, 0x5a027f09, 0x34001640, 0x34001640},
286 {0x0000a5c0, 0x5f027fc9, 0x5f027fc9, 0x38001660, 0x38001660},
287 {0x0000a5c4, 0x6502feca, 0x6502feca, 0x3b001861, 0x3b001861},
288 {0x0000a5c8, 0x6b02ff4a, 0x6b02ff4a, 0x3e001a81, 0x3e001a81},
289 {0x0000a5cc, 0x7203feca, 0x7203feca, 0x42001a83, 0x42001a83},
290 {0x0000a5d0, 0x7703ff0b, 0x7703ff0b, 0x44001c84, 0x44001c84},
291 {0x0000a5d4, 0x7d06ffcb, 0x7d06ffcb, 0x48001ce3, 0x48001ce3},
292 {0x0000a5d8, 0x8407ff0b, 0x8407ff0b, 0x4c001ce5, 0x4c001ce5},
293 {0x0000a5dc, 0x8907ffcb, 0x8907ffcb, 0x50001ce9, 0x50001ce9},
294 {0x0000a5e0, 0x900fff0b, 0x900fff0b, 0x54001ceb, 0x54001ceb},
295 {0x0000a5e4, 0x960fffcb, 0x960fffcb, 0x56001eec, 0x56001eec},
296 {0x0000a5e8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
297 {0x0000a5ec, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
298 {0x0000a5f0, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
299 {0x0000a5f4, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
300 {0x0000a5f8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
301 {0x0000a5fc, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
302 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
303 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
304 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
305 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
306 {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
307 {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
308 {0x0000a618, 0x02008501, 0x02008501, 0x02008501, 0x02008501},
309 {0x0000a61c, 0x02008802, 0x02008802, 0x02008802, 0x02008802},
310 {0x0000a620, 0x0300c802, 0x0300c802, 0x0300c802, 0x0300c802},
311 {0x0000a624, 0x0300cc03, 0x0300cc03, 0x0300cc03, 0x0300cc03},
312 {0x0000a628, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
313 {0x0000a62c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
314 {0x0000a630, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
315 {0x0000a634, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
316 {0x0000a638, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
317 {0x0000a63c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
318};
319
320static const u32 ar9331_1p2_baseband_core_txfir_coeff_japan_2484[][2] = {
321 /* Addr allmodes */
322 {0x0000a398, 0x00000000},
323 {0x0000a39c, 0x6f7f0301},
324 {0x0000a3a0, 0xca9228ee},
325};
326
327static const u32 ar9331_1p2_xtal_25M[][2] = {
328 /* Addr allmodes */
329 {0x00007038, 0x000002f8},
330 {0x00008244, 0x0010f3d7},
331 {0x0000824c, 0x0001e7ae},
332 {0x0001609c, 0x0f508f29},
333};
334
335static const u32 ar9331_1p2_radio_core[][2] = { 157static const u32 ar9331_1p2_radio_core[][2] = {
336 /* Addr allmodes */ 158 /* Addr allmodes */
337 {0x00016000, 0x36db6db6}, 159 {0x00016000, 0x36db6db6},
@@ -397,684 +219,24 @@ static const u32 ar9331_1p2_radio_core[][2] = {
397 {0x000163d4, 0x00000000}, 219 {0x000163d4, 0x00000000},
398}; 220};
399 221
400static const u32 ar9331_1p2_soc_postamble[][5] = { 222#define ar9331_1p2_baseband_core_txfir_coeff_japan_2484 ar9331_1p1_baseband_core_txfir_coeff_japan_2484
401 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
402 {0x00007010, 0x00000022, 0x00000022, 0x00000022, 0x00000022},
403};
404 223
405static const u32 ar9331_common_wo_xlna_rx_gain_1p2[][2] = { 224#define ar9331_1p2_xtal_25M ar9331_1p1_xtal_25M
406 /* Addr allmodes */
407 {0x0000a000, 0x00060005},
408 {0x0000a004, 0x00810080},
409 {0x0000a008, 0x00830082},
410 {0x0000a00c, 0x00850084},
411 {0x0000a010, 0x01820181},
412 {0x0000a014, 0x01840183},
413 {0x0000a018, 0x01880185},
414 {0x0000a01c, 0x018a0189},
415 {0x0000a020, 0x02850284},
416 {0x0000a024, 0x02890288},
417 {0x0000a028, 0x028b028a},
418 {0x0000a02c, 0x03850384},
419 {0x0000a030, 0x03890388},
420 {0x0000a034, 0x038b038a},
421 {0x0000a038, 0x038d038c},
422 {0x0000a03c, 0x03910390},
423 {0x0000a040, 0x03930392},
424 {0x0000a044, 0x03950394},
425 {0x0000a048, 0x00000396},
426 {0x0000a04c, 0x00000000},
427 {0x0000a050, 0x00000000},
428 {0x0000a054, 0x00000000},
429 {0x0000a058, 0x00000000},
430 {0x0000a05c, 0x00000000},
431 {0x0000a060, 0x00000000},
432 {0x0000a064, 0x00000000},
433 {0x0000a068, 0x00000000},
434 {0x0000a06c, 0x00000000},
435 {0x0000a070, 0x00000000},
436 {0x0000a074, 0x00000000},
437 {0x0000a078, 0x00000000},
438 {0x0000a07c, 0x00000000},
439 {0x0000a080, 0x28282828},
440 {0x0000a084, 0x28282828},
441 {0x0000a088, 0x28282828},
442 {0x0000a08c, 0x28282828},
443 {0x0000a090, 0x28282828},
444 {0x0000a094, 0x24242428},
445 {0x0000a098, 0x171e1e1e},
446 {0x0000a09c, 0x02020b0b},
447 {0x0000a0a0, 0x02020202},
448 {0x0000a0a4, 0x00000000},
449 {0x0000a0a8, 0x00000000},
450 {0x0000a0ac, 0x00000000},
451 {0x0000a0b0, 0x00000000},
452 {0x0000a0b4, 0x00000000},
453 {0x0000a0b8, 0x00000000},
454 {0x0000a0bc, 0x00000000},
455 {0x0000a0c0, 0x22072208},
456 {0x0000a0c4, 0x22052206},
457 {0x0000a0c8, 0x22032204},
458 {0x0000a0cc, 0x22012202},
459 {0x0000a0d0, 0x221f2200},
460 {0x0000a0d4, 0x221d221e},
461 {0x0000a0d8, 0x33023303},
462 {0x0000a0dc, 0x33003301},
463 {0x0000a0e0, 0x331e331f},
464 {0x0000a0e4, 0x4402331d},
465 {0x0000a0e8, 0x44004401},
466 {0x0000a0ec, 0x441e441f},
467 {0x0000a0f0, 0x55025503},
468 {0x0000a0f4, 0x55005501},
469 {0x0000a0f8, 0x551e551f},
470 {0x0000a0fc, 0x6602551d},
471 {0x0000a100, 0x66006601},
472 {0x0000a104, 0x661e661f},
473 {0x0000a108, 0x7703661d},
474 {0x0000a10c, 0x77017702},
475 {0x0000a110, 0x00007700},
476 {0x0000a114, 0x00000000},
477 {0x0000a118, 0x00000000},
478 {0x0000a11c, 0x00000000},
479 {0x0000a120, 0x00000000},
480 {0x0000a124, 0x00000000},
481 {0x0000a128, 0x00000000},
482 {0x0000a12c, 0x00000000},
483 {0x0000a130, 0x00000000},
484 {0x0000a134, 0x00000000},
485 {0x0000a138, 0x00000000},
486 {0x0000a13c, 0x00000000},
487 {0x0000a140, 0x001f0000},
488 {0x0000a144, 0x111f1100},
489 {0x0000a148, 0x111d111e},
490 {0x0000a14c, 0x111b111c},
491 {0x0000a150, 0x22032204},
492 {0x0000a154, 0x22012202},
493 {0x0000a158, 0x221f2200},
494 {0x0000a15c, 0x221d221e},
495 {0x0000a160, 0x33013302},
496 {0x0000a164, 0x331f3300},
497 {0x0000a168, 0x4402331e},
498 {0x0000a16c, 0x44004401},
499 {0x0000a170, 0x441e441f},
500 {0x0000a174, 0x55015502},
501 {0x0000a178, 0x551f5500},
502 {0x0000a17c, 0x6602551e},
503 {0x0000a180, 0x66006601},
504 {0x0000a184, 0x661e661f},
505 {0x0000a188, 0x7703661d},
506 {0x0000a18c, 0x77017702},
507 {0x0000a190, 0x00007700},
508 {0x0000a194, 0x00000000},
509 {0x0000a198, 0x00000000},
510 {0x0000a19c, 0x00000000},
511 {0x0000a1a0, 0x00000000},
512 {0x0000a1a4, 0x00000000},
513 {0x0000a1a8, 0x00000000},
514 {0x0000a1ac, 0x00000000},
515 {0x0000a1b0, 0x00000000},
516 {0x0000a1b4, 0x00000000},
517 {0x0000a1b8, 0x00000000},
518 {0x0000a1bc, 0x00000000},
519 {0x0000a1c0, 0x00000000},
520 {0x0000a1c4, 0x00000000},
521 {0x0000a1c8, 0x00000000},
522 {0x0000a1cc, 0x00000000},
523 {0x0000a1d0, 0x00000000},
524 {0x0000a1d4, 0x00000000},
525 {0x0000a1d8, 0x00000000},
526 {0x0000a1dc, 0x00000000},
527 {0x0000a1e0, 0x00000000},
528 {0x0000a1e4, 0x00000000},
529 {0x0000a1e8, 0x00000000},
530 {0x0000a1ec, 0x00000000},
531 {0x0000a1f0, 0x00000396},
532 {0x0000a1f4, 0x00000396},
533 {0x0000a1f8, 0x00000396},
534 {0x0000a1fc, 0x00000296},
535};
536 225
537static const u32 ar9331_1p2_baseband_core[][2] = { 226#define ar9331_1p2_xtal_40M ar9331_1p1_xtal_40M
538 /* Addr allmodes */
539 {0x00009800, 0xafe68e30},
540 {0x00009804, 0xfd14e000},
541 {0x00009808, 0x9c0a8f6b},
542 {0x0000980c, 0x04800000},
543 {0x00009814, 0x9280c00a},
544 {0x00009818, 0x00000000},
545 {0x0000981c, 0x00020028},
546 {0x00009834, 0x5f3ca3de},
547 {0x00009838, 0x0108ecff},
548 {0x0000983c, 0x14750600},
549 {0x00009880, 0x201fff00},
550 {0x00009884, 0x00001042},
551 {0x000098a4, 0x00200400},
552 {0x000098b0, 0x32840bbe},
553 {0x000098d0, 0x004b6a8e},
554 {0x000098d4, 0x00000820},
555 {0x000098dc, 0x00000000},
556 {0x000098f0, 0x00000000},
557 {0x000098f4, 0x00000000},
558 {0x00009c04, 0x00000000},
559 {0x00009c08, 0x03200000},
560 {0x00009c0c, 0x00000000},
561 {0x00009c10, 0x00000000},
562 {0x00009c14, 0x00046384},
563 {0x00009c18, 0x05b6b440},
564 {0x00009c1c, 0x00b6b440},
565 {0x00009d00, 0xc080a333},
566 {0x00009d04, 0x40206c10},
567 {0x00009d08, 0x009c4060},
568 {0x00009d0c, 0x1883800a},
569 {0x00009d10, 0x01834061},
570 {0x00009d14, 0x00c00400},
571 {0x00009d18, 0x00000000},
572 {0x00009e08, 0x0038233c},
573 {0x00009e24, 0x9927b515},
574 {0x00009e28, 0x12ef0200},
575 {0x00009e30, 0x06336f77},
576 {0x00009e34, 0x6af6532f},
577 {0x00009e38, 0x0cc80c00},
578 {0x00009e40, 0x0d261820},
579 {0x00009e4c, 0x00001004},
580 {0x00009e50, 0x00ff03f1},
581 {0x00009fc0, 0x803e4788},
582 {0x00009fc4, 0x0001efb5},
583 {0x00009fcc, 0x40000014},
584 {0x0000a20c, 0x00000000},
585 {0x0000a220, 0x00000000},
586 {0x0000a224, 0x00000000},
587 {0x0000a228, 0x10002310},
588 {0x0000a23c, 0x00000000},
589 {0x0000a244, 0x0c000000},
590 {0x0000a2a0, 0x00000001},
591 {0x0000a2c0, 0x00000001},
592 {0x0000a2c8, 0x00000000},
593 {0x0000a2cc, 0x18c43433},
594 {0x0000a2d4, 0x00000000},
595 {0x0000a2dc, 0x00000000},
596 {0x0000a2e0, 0x00000000},
597 {0x0000a2e4, 0x00000000},
598 {0x0000a2e8, 0x00000000},
599 {0x0000a2ec, 0x00000000},
600 {0x0000a2f0, 0x00000000},
601 {0x0000a2f4, 0x00000000},
602 {0x0000a2f8, 0x00000000},
603 {0x0000a344, 0x00000000},
604 {0x0000a34c, 0x00000000},
605 {0x0000a350, 0x0000a000},
606 {0x0000a364, 0x00000000},
607 {0x0000a370, 0x00000000},
608 {0x0000a390, 0x00000001},
609 {0x0000a394, 0x00000444},
610 {0x0000a398, 0x001f0e0f},
611 {0x0000a39c, 0x0075393f},
612 {0x0000a3a0, 0xb79f6427},
613 {0x0000a3a4, 0x00000000},
614 {0x0000a3a8, 0xaaaaaaaa},
615 {0x0000a3ac, 0x3c466478},
616 {0x0000a3c0, 0x20202020},
617 {0x0000a3c4, 0x22222220},
618 {0x0000a3c8, 0x20200020},
619 {0x0000a3cc, 0x20202020},
620 {0x0000a3d0, 0x20202020},
621 {0x0000a3d4, 0x20202020},
622 {0x0000a3d8, 0x20202020},
623 {0x0000a3dc, 0x20202020},
624 {0x0000a3e0, 0x20202020},
625 {0x0000a3e4, 0x20202020},
626 {0x0000a3e8, 0x20202020},
627 {0x0000a3ec, 0x20202020},
628 {0x0000a3f0, 0x00000000},
629 {0x0000a3f4, 0x00000006},
630 {0x0000a3f8, 0x0cdbd380},
631 {0x0000a3fc, 0x000f0f01},
632 {0x0000a400, 0x8fa91f01},
633 {0x0000a404, 0x00000000},
634 {0x0000a408, 0x0e79e5c6},
635 {0x0000a40c, 0x00820820},
636 {0x0000a414, 0x1ce739ce},
637 {0x0000a418, 0x2d001dce},
638 {0x0000a41c, 0x1ce739ce},
639 {0x0000a420, 0x000001ce},
640 {0x0000a424, 0x1ce739ce},
641 {0x0000a428, 0x000001ce},
642 {0x0000a42c, 0x1ce739ce},
643 {0x0000a430, 0x1ce739ce},
644 {0x0000a434, 0x00000000},
645 {0x0000a438, 0x00001801},
646 {0x0000a43c, 0x00000000},
647 {0x0000a440, 0x00000000},
648 {0x0000a444, 0x00000000},
649 {0x0000a448, 0x04000000},
650 {0x0000a44c, 0x00000001},
651 {0x0000a450, 0x00010000},
652 {0x0000a458, 0x00000000},
653 {0x0000a640, 0x00000000},
654 {0x0000a644, 0x3fad9d74},
655 {0x0000a648, 0x0048060a},
656 {0x0000a64c, 0x00003c37},
657 {0x0000a670, 0x03020100},
658 {0x0000a674, 0x09080504},
659 {0x0000a678, 0x0d0c0b0a},
660 {0x0000a67c, 0x13121110},
661 {0x0000a680, 0x31301514},
662 {0x0000a684, 0x35343332},
663 {0x0000a688, 0x00000036},
664 {0x0000a690, 0x00000838},
665 {0x0000a7c0, 0x00000000},
666 {0x0000a7c4, 0xfffffffc},
667 {0x0000a7c8, 0x00000000},
668 {0x0000a7cc, 0x00000000},
669 {0x0000a7d0, 0x00000000},
670 {0x0000a7d4, 0x00000004},
671 {0x0000a7dc, 0x00000001},
672};
673 227
674static const u32 ar9331_modes_high_power_tx_gain_1p2[][5] = { 228#define ar9331_1p2_baseband_core ar9331_1p1_baseband_core
675 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
676 {0x0000a410, 0x000050d7, 0x000050d7, 0x000050d7, 0x000050d7},
677 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
678 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
679 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
680 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
681 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
682 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
683 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
684 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
685 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x23000a00, 0x23000a00},
686 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x27000a02, 0x27000a02},
687 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2b000a04, 0x2b000a04},
688 {0x0000a52c, 0x41023e85, 0x41023e85, 0x3f001620, 0x3f001620},
689 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x41001621, 0x41001621},
690 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x44001640, 0x44001640},
691 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x46001641, 0x46001641},
692 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x48001642, 0x48001642},
693 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x4b001644, 0x4b001644},
694 {0x0000a544, 0x6502feca, 0x6502feca, 0x4e001a81, 0x4e001a81},
695 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x51001a83, 0x51001a83},
696 {0x0000a54c, 0x7203feca, 0x7203feca, 0x54001c84, 0x54001c84},
697 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x57001ce3, 0x57001ce3},
698 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x5b001ce5, 0x5b001ce5},
699 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5f001ce9, 0x5f001ce9},
700 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x66001eec, 0x66001eec},
701 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x66001eec, 0x66001eec},
702 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001eec, 0x66001eec},
703 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
704 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
705 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
706 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
707 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
708 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
709 {0x0000a580, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
710 {0x0000a584, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
711 {0x0000a588, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
712 {0x0000a58c, 0x11062202, 0x11062202, 0x0b000200, 0x0b000200},
713 {0x0000a590, 0x17022e00, 0x17022e00, 0x0f000202, 0x0f000202},
714 {0x0000a594, 0x1d000ec2, 0x1d000ec2, 0x11000400, 0x11000400},
715 {0x0000a598, 0x25020ec0, 0x25020ec0, 0x15000402, 0x15000402},
716 {0x0000a59c, 0x2b020ec3, 0x2b020ec3, 0x19000404, 0x19000404},
717 {0x0000a5a0, 0x2f001f04, 0x2f001f04, 0x1b000603, 0x1b000603},
718 {0x0000a5a4, 0x35001fc4, 0x35001fc4, 0x1f000a02, 0x1f000a02},
719 {0x0000a5a8, 0x3c022f04, 0x3c022f04, 0x23000a04, 0x23000a04},
720 {0x0000a5ac, 0x41023e85, 0x41023e85, 0x26000a20, 0x26000a20},
721 {0x0000a5b0, 0x48023ec6, 0x48023ec6, 0x2a000e20, 0x2a000e20},
722 {0x0000a5b4, 0x4d023f01, 0x4d023f01, 0x2e000e22, 0x2e000e22},
723 {0x0000a5b8, 0x53023f4b, 0x53023f4b, 0x31000e24, 0x31000e24},
724 {0x0000a5bc, 0x5a027f09, 0x5a027f09, 0x34001640, 0x34001640},
725 {0x0000a5c0, 0x5f027fc9, 0x5f027fc9, 0x38001660, 0x38001660},
726 {0x0000a5c4, 0x6502feca, 0x6502feca, 0x3b001861, 0x3b001861},
727 {0x0000a5c8, 0x6b02ff4a, 0x6b02ff4a, 0x3e001a81, 0x3e001a81},
728 {0x0000a5cc, 0x7203feca, 0x7203feca, 0x42001a83, 0x42001a83},
729 {0x0000a5d0, 0x7703ff0b, 0x7703ff0b, 0x44001c84, 0x44001c84},
730 {0x0000a5d4, 0x7d06ffcb, 0x7d06ffcb, 0x48001ce3, 0x48001ce3},
731 {0x0000a5d8, 0x8407ff0b, 0x8407ff0b, 0x4c001ce5, 0x4c001ce5},
732 {0x0000a5dc, 0x8907ffcb, 0x8907ffcb, 0x50001ce9, 0x50001ce9},
733 {0x0000a5e0, 0x900fff0b, 0x900fff0b, 0x54001ceb, 0x54001ceb},
734 {0x0000a5e4, 0x960fffcb, 0x960fffcb, 0x56001eec, 0x56001eec},
735 {0x0000a5e8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
736 {0x0000a5ec, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
737 {0x0000a5f0, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
738 {0x0000a5f4, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
739 {0x0000a5f8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
740 {0x0000a5fc, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
741 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
742 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
743 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
744 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
745 {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
746 {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
747 {0x0000a618, 0x02008501, 0x02008501, 0x02008501, 0x02008501},
748 {0x0000a61c, 0x02008802, 0x02008802, 0x02008802, 0x02008802},
749 {0x0000a620, 0x0300c802, 0x0300c802, 0x0300c802, 0x0300c802},
750 {0x0000a624, 0x0300cc03, 0x0300cc03, 0x0300cc03, 0x0300cc03},
751 {0x0000a628, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
752 {0x0000a62c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
753 {0x0000a630, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
754 {0x0000a634, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
755 {0x0000a638, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
756 {0x0000a63c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
757};
758 229
759static const u32 ar9331_1p2_mac_postamble[][5] = { 230#define ar9331_1p2_soc_postamble ar9331_1p1_soc_postamble
760 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
761 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
762 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
763 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
764 {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
765 {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
766 {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
767 {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
768 {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
769};
770 231
771static const u32 ar9331_1p2_soc_preamble[][2] = { 232#define ar9331_1p2_mac_postamble ar9331_1p1_mac_postamble
772 /* Addr allmodes */
773 {0x00007020, 0x00000000},
774 {0x00007034, 0x00000002},
775 {0x00007038, 0x000002f8},
776};
777 233
778static const u32 ar9331_1p2_xtal_40M[][2] = { 234#define ar9331_1p2_soc_preamble ar9331_1p1_soc_preamble
779 /* Addr allmodes */
780 {0x00007038, 0x000004c2},
781 {0x00008244, 0x0010f400},
782 {0x0000824c, 0x0001e800},
783 {0x0001609c, 0x0b283f31},
784};
785 235
786static const u32 ar9331_1p2_mac_core[][2] = { 236#define ar9331_1p2_mac_core ar9331_1p1_mac_core
787 /* Addr allmodes */
788 {0x00000008, 0x00000000},
789 {0x00000030, 0x00020085},
790 {0x00000034, 0x00000005},
791 {0x00000040, 0x00000000},
792 {0x00000044, 0x00000000},
793 {0x00000048, 0x00000008},
794 {0x0000004c, 0x00000010},
795 {0x00000050, 0x00000000},
796 {0x00001040, 0x002ffc0f},
797 {0x00001044, 0x002ffc0f},
798 {0x00001048, 0x002ffc0f},
799 {0x0000104c, 0x002ffc0f},
800 {0x00001050, 0x002ffc0f},
801 {0x00001054, 0x002ffc0f},
802 {0x00001058, 0x002ffc0f},
803 {0x0000105c, 0x002ffc0f},
804 {0x00001060, 0x002ffc0f},
805 {0x00001064, 0x002ffc0f},
806 {0x000010f0, 0x00000100},
807 {0x00001270, 0x00000000},
808 {0x000012b0, 0x00000000},
809 {0x000012f0, 0x00000000},
810 {0x0000143c, 0x00000000},
811 {0x0000147c, 0x00000000},
812 {0x00008000, 0x00000000},
813 {0x00008004, 0x00000000},
814 {0x00008008, 0x00000000},
815 {0x0000800c, 0x00000000},
816 {0x00008018, 0x00000000},
817 {0x00008020, 0x00000000},
818 {0x00008038, 0x00000000},
819 {0x0000803c, 0x00000000},
820 {0x00008040, 0x00000000},
821 {0x00008044, 0x00000000},
822 {0x00008048, 0x00000000},
823 {0x0000804c, 0xffffffff},
824 {0x00008054, 0x00000000},
825 {0x00008058, 0x00000000},
826 {0x0000805c, 0x000fc78f},
827 {0x00008060, 0x0000000f},
828 {0x00008064, 0x00000000},
829 {0x00008070, 0x00000310},
830 {0x00008074, 0x00000020},
831 {0x00008078, 0x00000000},
832 {0x0000809c, 0x0000000f},
833 {0x000080a0, 0x00000000},
834 {0x000080a4, 0x02ff0000},
835 {0x000080a8, 0x0e070605},
836 {0x000080ac, 0x0000000d},
837 {0x000080b0, 0x00000000},
838 {0x000080b4, 0x00000000},
839 {0x000080b8, 0x00000000},
840 {0x000080bc, 0x00000000},
841 {0x000080c0, 0x2a800000},
842 {0x000080c4, 0x06900168},
843 {0x000080c8, 0x13881c20},
844 {0x000080cc, 0x01f40000},
845 {0x000080d0, 0x00252500},
846 {0x000080d4, 0x00a00000},
847 {0x000080d8, 0x00400000},
848 {0x000080dc, 0x00000000},
849 {0x000080e0, 0xffffffff},
850 {0x000080e4, 0x0000ffff},
851 {0x000080e8, 0x3f3f3f3f},
852 {0x000080ec, 0x00000000},
853 {0x000080f0, 0x00000000},
854 {0x000080f4, 0x00000000},
855 {0x000080fc, 0x00020000},
856 {0x00008100, 0x00000000},
857 {0x00008108, 0x00000052},
858 {0x0000810c, 0x00000000},
859 {0x00008110, 0x00000000},
860 {0x00008114, 0x000007ff},
861 {0x00008118, 0x000000aa},
862 {0x0000811c, 0x00003210},
863 {0x00008124, 0x00000000},
864 {0x00008128, 0x00000000},
865 {0x0000812c, 0x00000000},
866 {0x00008130, 0x00000000},
867 {0x00008134, 0x00000000},
868 {0x00008138, 0x00000000},
869 {0x0000813c, 0x0000ffff},
870 {0x00008144, 0xffffffff},
871 {0x00008168, 0x00000000},
872 {0x0000816c, 0x00000000},
873 {0x00008170, 0x18486200},
874 {0x00008174, 0x33332210},
875 {0x00008178, 0x00000000},
876 {0x0000817c, 0x00020000},
877 {0x000081c0, 0x00000000},
878 {0x000081c4, 0x33332210},
879 {0x000081c8, 0x00000000},
880 {0x000081cc, 0x00000000},
881 {0x000081d4, 0x00000000},
882 {0x000081ec, 0x00000000},
883 {0x000081f0, 0x00000000},
884 {0x000081f4, 0x00000000},
885 {0x000081f8, 0x00000000},
886 {0x000081fc, 0x00000000},
887 {0x00008240, 0x00100000},
888 {0x00008248, 0x00000800},
889 {0x00008250, 0x00000000},
890 {0x00008254, 0x00000000},
891 {0x00008258, 0x00000000},
892 {0x0000825c, 0x40000000},
893 {0x00008260, 0x00080922},
894 {0x00008264, 0x9d400010},
895 {0x00008268, 0xffffffff},
896 {0x0000826c, 0x0000ffff},
897 {0x00008270, 0x00000000},
898 {0x00008274, 0x40000000},
899 {0x00008278, 0x003e4180},
900 {0x0000827c, 0x00000004},
901 {0x00008284, 0x0000002c},
902 {0x00008288, 0x0000002c},
903 {0x0000828c, 0x000000ff},
904 {0x00008294, 0x00000000},
905 {0x00008298, 0x00000000},
906 {0x0000829c, 0x00000000},
907 {0x00008300, 0x00000140},
908 {0x00008314, 0x00000000},
909 {0x0000831c, 0x0000010d},
910 {0x00008328, 0x00000000},
911 {0x0000832c, 0x00000007},
912 {0x00008330, 0x00000302},
913 {0x00008334, 0x00000700},
914 {0x00008338, 0x00ff0000},
915 {0x0000833c, 0x02400000},
916 {0x00008340, 0x000107ff},
917 {0x00008344, 0xaa48105b},
918 {0x00008348, 0x008f0000},
919 {0x0000835c, 0x00000000},
920 {0x00008360, 0xffffffff},
921 {0x00008364, 0xffffffff},
922 {0x00008368, 0x00000000},
923 {0x00008370, 0x00000000},
924 {0x00008374, 0x000000ff},
925 {0x00008378, 0x00000000},
926 {0x0000837c, 0x00000000},
927 {0x00008380, 0xffffffff},
928 {0x00008384, 0xffffffff},
929 {0x00008390, 0xffffffff},
930 {0x00008394, 0xffffffff},
931 {0x00008398, 0x00000000},
932 {0x0000839c, 0x00000000},
933 {0x000083a0, 0x00000000},
934 {0x000083a4, 0x0000fa14},
935 {0x000083a8, 0x000f0c00},
936 {0x000083ac, 0x33332210},
937 {0x000083b0, 0x33332210},
938 {0x000083b4, 0x33332210},
939 {0x000083b8, 0x33332210},
940 {0x000083bc, 0x00000000},
941 {0x000083c0, 0x00000000},
942 {0x000083c4, 0x00000000},
943 {0x000083c8, 0x00000000},
944 {0x000083cc, 0x00000200},
945 {0x000083d0, 0x000301ff},
946};
947 237
948static const u32 ar9331_common_rx_gain_1p2[][2] = { 238#define ar9331_common_wo_xlna_rx_gain_1p2 ar9331_common_wo_xlna_rx_gain_1p1
949 /* Addr allmodes */ 239
950 {0x0000a000, 0x00010000}, 240#define ar9331_common_rx_gain_1p2 ar9485_common_rx_gain_1_1
951 {0x0000a004, 0x00030002},
952 {0x0000a008, 0x00050004},
953 {0x0000a00c, 0x00810080},
954 {0x0000a010, 0x01800082},
955 {0x0000a014, 0x01820181},
956 {0x0000a018, 0x01840183},
957 {0x0000a01c, 0x01880185},
958 {0x0000a020, 0x018a0189},
959 {0x0000a024, 0x02850284},
960 {0x0000a028, 0x02890288},
961 {0x0000a02c, 0x03850384},
962 {0x0000a030, 0x03890388},
963 {0x0000a034, 0x038b038a},
964 {0x0000a038, 0x038d038c},
965 {0x0000a03c, 0x03910390},
966 {0x0000a040, 0x03930392},
967 {0x0000a044, 0x03950394},
968 {0x0000a048, 0x00000396},
969 {0x0000a04c, 0x00000000},
970 {0x0000a050, 0x00000000},
971 {0x0000a054, 0x00000000},
972 {0x0000a058, 0x00000000},
973 {0x0000a05c, 0x00000000},
974 {0x0000a060, 0x00000000},
975 {0x0000a064, 0x00000000},
976 {0x0000a068, 0x00000000},
977 {0x0000a06c, 0x00000000},
978 {0x0000a070, 0x00000000},
979 {0x0000a074, 0x00000000},
980 {0x0000a078, 0x00000000},
981 {0x0000a07c, 0x00000000},
982 {0x0000a080, 0x28282828},
983 {0x0000a084, 0x28282828},
984 {0x0000a088, 0x28282828},
985 {0x0000a08c, 0x28282828},
986 {0x0000a090, 0x28282828},
987 {0x0000a094, 0x21212128},
988 {0x0000a098, 0x171c1c1c},
989 {0x0000a09c, 0x02020212},
990 {0x0000a0a0, 0x00000202},
991 {0x0000a0a4, 0x00000000},
992 {0x0000a0a8, 0x00000000},
993 {0x0000a0ac, 0x00000000},
994 {0x0000a0b0, 0x00000000},
995 {0x0000a0b4, 0x00000000},
996 {0x0000a0b8, 0x00000000},
997 {0x0000a0bc, 0x00000000},
998 {0x0000a0c0, 0x001f0000},
999 {0x0000a0c4, 0x111f1100},
1000 {0x0000a0c8, 0x111d111e},
1001 {0x0000a0cc, 0x111b111c},
1002 {0x0000a0d0, 0x22032204},
1003 {0x0000a0d4, 0x22012202},
1004 {0x0000a0d8, 0x221f2200},
1005 {0x0000a0dc, 0x221d221e},
1006 {0x0000a0e0, 0x33013302},
1007 {0x0000a0e4, 0x331f3300},
1008 {0x0000a0e8, 0x4402331e},
1009 {0x0000a0ec, 0x44004401},
1010 {0x0000a0f0, 0x441e441f},
1011 {0x0000a0f4, 0x55015502},
1012 {0x0000a0f8, 0x551f5500},
1013 {0x0000a0fc, 0x6602551e},
1014 {0x0000a100, 0x66006601},
1015 {0x0000a104, 0x661e661f},
1016 {0x0000a108, 0x7703661d},
1017 {0x0000a10c, 0x77017702},
1018 {0x0000a110, 0x00007700},
1019 {0x0000a114, 0x00000000},
1020 {0x0000a118, 0x00000000},
1021 {0x0000a11c, 0x00000000},
1022 {0x0000a120, 0x00000000},
1023 {0x0000a124, 0x00000000},
1024 {0x0000a128, 0x00000000},
1025 {0x0000a12c, 0x00000000},
1026 {0x0000a130, 0x00000000},
1027 {0x0000a134, 0x00000000},
1028 {0x0000a138, 0x00000000},
1029 {0x0000a13c, 0x00000000},
1030 {0x0000a140, 0x001f0000},
1031 {0x0000a144, 0x111f1100},
1032 {0x0000a148, 0x111d111e},
1033 {0x0000a14c, 0x111b111c},
1034 {0x0000a150, 0x22032204},
1035 {0x0000a154, 0x22012202},
1036 {0x0000a158, 0x221f2200},
1037 {0x0000a15c, 0x221d221e},
1038 {0x0000a160, 0x33013302},
1039 {0x0000a164, 0x331f3300},
1040 {0x0000a168, 0x4402331e},
1041 {0x0000a16c, 0x44004401},
1042 {0x0000a170, 0x441e441f},
1043 {0x0000a174, 0x55015502},
1044 {0x0000a178, 0x551f5500},
1045 {0x0000a17c, 0x6602551e},
1046 {0x0000a180, 0x66006601},
1047 {0x0000a184, 0x661e661f},
1048 {0x0000a188, 0x7703661d},
1049 {0x0000a18c, 0x77017702},
1050 {0x0000a190, 0x00007700},
1051 {0x0000a194, 0x00000000},
1052 {0x0000a198, 0x00000000},
1053 {0x0000a19c, 0x00000000},
1054 {0x0000a1a0, 0x00000000},
1055 {0x0000a1a4, 0x00000000},
1056 {0x0000a1a8, 0x00000000},
1057 {0x0000a1ac, 0x00000000},
1058 {0x0000a1b0, 0x00000000},
1059 {0x0000a1b4, 0x00000000},
1060 {0x0000a1b8, 0x00000000},
1061 {0x0000a1bc, 0x00000000},
1062 {0x0000a1c0, 0x00000000},
1063 {0x0000a1c4, 0x00000000},
1064 {0x0000a1c8, 0x00000000},
1065 {0x0000a1cc, 0x00000000},
1066 {0x0000a1d0, 0x00000000},
1067 {0x0000a1d4, 0x00000000},
1068 {0x0000a1d8, 0x00000000},
1069 {0x0000a1dc, 0x00000000},
1070 {0x0000a1e0, 0x00000000},
1071 {0x0000a1e4, 0x00000000},
1072 {0x0000a1e8, 0x00000000},
1073 {0x0000a1ec, 0x00000000},
1074 {0x0000a1f0, 0x00000396},
1075 {0x0000a1f4, 0x00000396},
1076 {0x0000a1f8, 0x00000396},
1077 {0x0000a1fc, 0x00000296},
1078};
1079 241
1080#endif /* INITVALS_9330_1P2_H */ 242#endif /* INITVALS_9330_1P2_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
index 815a8af1beef..1d8235e19f0f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2011 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
3 * 4 *
4 * Permission to use, copy, modify, and/or distribute this software for any 5 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 6 * purpose with or without fee is hereby granted, provided that the above
@@ -18,16 +19,16 @@
18#define INITVALS_9340_H 19#define INITVALS_9340_H
19 20
20static const u32 ar9340_1p0_radio_postamble[][5] = { 21static const u32 ar9340_1p0_radio_postamble[][5] = {
21 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 22 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
22 {0x000160ac, 0xa4646800, 0xa4646800, 0xa4646800, 0xa4646800}, 23 {0x000160ac, 0xa4646800, 0xa4646800, 0xa4646800, 0xa4646800},
23 {0x0001610c, 0x08000000, 0x08000000, 0x00000000, 0x00000000}, 24 {0x0001610c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
24 {0x00016140, 0x10804000, 0x10804000, 0x50804000, 0x50804000}, 25 {0x00016140, 0x10804000, 0x10804000, 0x50804000, 0x50804000},
25 {0x0001650c, 0x08000000, 0x08000000, 0x00000000, 0x00000000}, 26 {0x0001650c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
26 {0x00016540, 0x10804000, 0x10804000, 0x50804000, 0x50804000}, 27 {0x00016540, 0x10804000, 0x10804000, 0x50804000, 0x50804000},
27}; 28};
28 29
29static const u32 ar9340Modes_lowest_ob_db_tx_gain_table_1p0[][5] = { 30static const u32 ar9340Modes_lowest_ob_db_tx_gain_table_1p0[][5] = {
30 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 31 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
31 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, 32 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
32 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 33 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
33 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002}, 34 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
@@ -99,21 +100,10 @@ static const u32 ar9340Modes_lowest_ob_db_tx_gain_table_1p0[][5] = {
99 {0x00016448, 0x24925266, 0x24925266, 0x24925266, 0x24925266}, 100 {0x00016448, 0x24925266, 0x24925266, 0x24925266, 0x24925266},
100}; 101};
101 102
102static const u32 ar9340Modes_fast_clock_1p0[][3] = { 103#define ar9340Modes_fast_clock_1p0 ar9300Modes_fast_clock_2p2
103 /* Addr 5G_HT20 5G_HT40 */
104 {0x00001030, 0x00000268, 0x000004d0},
105 {0x00001070, 0x0000018c, 0x00000318},
106 {0x000010b0, 0x00000fd0, 0x00001fa0},
107 {0x00008014, 0x044c044c, 0x08980898},
108 {0x0000801c, 0x148ec02b, 0x148ec057},
109 {0x00008318, 0x000044c0, 0x00008980},
110 {0x00009e00, 0x03721821, 0x03721821},
111 {0x0000a230, 0x0000000b, 0x00000016},
112 {0x0000a254, 0x00000898, 0x00001130},
113};
114 104
115static const u32 ar9340_1p0_radio_core[][2] = { 105static const u32 ar9340_1p0_radio_core[][2] = {
116 /* Addr allmodes */ 106 /* Addr allmodes */
117 {0x00016000, 0x36db6db6}, 107 {0x00016000, 0x36db6db6},
118 {0x00016004, 0x6db6db40}, 108 {0x00016004, 0x6db6db40},
119 {0x00016008, 0x73f00000}, 109 {0x00016008, 0x73f00000},
@@ -146,15 +136,13 @@ static const u32 ar9340_1p0_radio_core[][2] = {
146 {0x00016100, 0x04cb0001}, 136 {0x00016100, 0x04cb0001},
147 {0x00016104, 0xfff80000}, 137 {0x00016104, 0xfff80000},
148 {0x00016108, 0x00080010}, 138 {0x00016108, 0x00080010},
149 {0x0001610c, 0x00000000},
150 {0x00016140, 0x50804008}, 139 {0x00016140, 0x50804008},
151 {0x00016144, 0x01884080}, 140 {0x00016144, 0x01884080},
152 {0x00016148, 0x000080c0}, 141 {0x00016148, 0x000080c0},
153 {0x00016280, 0x01000015}, 142 {0x00016280, 0x01000015},
154 {0x00016284, 0x05530000}, 143 {0x00016284, 0x15530000},
155 {0x00016288, 0x00318000}, 144 {0x00016288, 0x00318000},
156 {0x0001628c, 0x50000000}, 145 {0x0001628c, 0x50000000},
157 {0x00016290, 0x4080294f},
158 {0x00016380, 0x00000000}, 146 {0x00016380, 0x00000000},
159 {0x00016384, 0x00000000}, 147 {0x00016384, 0x00000000},
160 {0x00016388, 0x00800700}, 148 {0x00016388, 0x00800700},
@@ -219,52 +207,43 @@ static const u32 ar9340_1p0_radio_core[][2] = {
219}; 207};
220 208
221static const u32 ar9340_1p0_radio_core_40M[][2] = { 209static const u32 ar9340_1p0_radio_core_40M[][2] = {
210 /* Addr allmodes */
222 {0x0001609c, 0x02566f3a}, 211 {0x0001609c, 0x02566f3a},
223 {0x000160ac, 0xa4647c00}, 212 {0x000160ac, 0xa4647c00},
224 {0x000160b0, 0x01885f5a}, 213 {0x000160b0, 0x01885f5a},
225}; 214};
226 215
227static const u32 ar9340_1p0_mac_postamble[][5] = { 216#define ar9340_1p0_mac_postamble ar9300_2p2_mac_postamble
228 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
229 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
230 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
231 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
232 {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
233 {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
234 {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
235 {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
236 {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
237};
238 217
239static const u32 ar9340_1p0_soc_postamble[][5] = { 218#define ar9340_1p0_soc_postamble ar9300_2p2_soc_postamble
240 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
241 {0x00007010, 0x00000023, 0x00000023, 0x00000023, 0x00000023},
242};
243 219
244static const u32 ar9340_1p0_baseband_postamble[][5] = { 220static const u32 ar9340_1p0_baseband_postamble[][5] = {
245 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 221 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
246 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011}, 222 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
247 {0x00009820, 0x206a022e, 0x206a022e, 0x206a022e, 0x206a022e}, 223 {0x00009820, 0x206a022e, 0x206a022e, 0x206a022e, 0x206a022e},
248 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0}, 224 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
249 {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881}, 225 {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
250 {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4}, 226 {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
251 {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c}, 227 {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
252 {0x00009c00, 0x00000044, 0x000000c4, 0x000000c4, 0x00000044}, 228 {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
253 {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0}, 229 {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
254 {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020}, 230 {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
255 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2}, 231 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
256 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec88d2e, 0x7ec88d2e}, 232 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec88d2e, 0x7ec88d2e},
257 {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e}, 233 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
258 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 234 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
259 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 235 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
260 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, 236 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
261 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, 237 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
238 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
262 {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27}, 239 {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
263 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, 240 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
264 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, 241 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
265 {0x0000a204, 0x00003fc0, 0x00003fc4, 0x00003fc4, 0x00003fc0}, 242 {0x0000a204, 0x00003ec0, 0x00003ec4, 0x00003ec4, 0x00003ec0},
266 {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004}, 243 {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
244 {0x0000a22c, 0x07e26a2f, 0x07e26a2f, 0x01026a2f, 0x01026a2f},
267 {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b}, 245 {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
246 {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
268 {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018}, 247 {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
269 {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108}, 248 {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
270 {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898}, 249 {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
@@ -277,11 +256,11 @@ static const u32 ar9340_1p0_baseband_postamble[][5] = {
277 {0x0000a288, 0x00000220, 0x00000220, 0x00000110, 0x00000110}, 256 {0x0000a288, 0x00000220, 0x00000220, 0x00000110, 0x00000110},
278 {0x0000a28c, 0x00011111, 0x00011111, 0x00022222, 0x00022222}, 257 {0x0000a28c, 0x00011111, 0x00011111, 0x00022222, 0x00022222},
279 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, 258 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
280 {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071982}, 259 {0x0000a2d0, 0x00041983, 0x00041983, 0x00041982, 0x00041982},
281 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a}, 260 {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
282 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 261 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
283 {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c}, 262 {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
284 {0x0000ae04, 0x00180000, 0x00180000, 0x00180000, 0x00180000}, 263 {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
285 {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 264 {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
286 {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c}, 265 {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
287 {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce}, 266 {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
@@ -289,21 +268,21 @@ static const u32 ar9340_1p0_baseband_postamble[][5] = {
289}; 268};
290 269
291static const u32 ar9340_1p0_baseband_core[][2] = { 270static const u32 ar9340_1p0_baseband_core[][2] = {
292 /* Addr allmodes */ 271 /* Addr allmodes */
293 {0x00009800, 0xafe68e30}, 272 {0x00009800, 0xafe68e30},
294 {0x00009804, 0xfd14e000}, 273 {0x00009804, 0xfd14e000},
295 {0x00009808, 0x9c0a9f6b}, 274 {0x00009808, 0x9c0a9f6b},
296 {0x0000980c, 0x04900000}, 275 {0x0000980c, 0x04900000},
297 {0x00009814, 0xb280c00a}, 276 {0x00009814, 0x3280c00a},
298 {0x00009818, 0x00000000}, 277 {0x00009818, 0x00000000},
299 {0x0000981c, 0x00020028}, 278 {0x0000981c, 0x00020028},
300 {0x00009834, 0x5f3ca3de}, 279 {0x00009834, 0x6400a190},
301 {0x00009838, 0x0108ecff}, 280 {0x00009838, 0x0108ecff},
302 {0x0000983c, 0x14750600}, 281 {0x0000983c, 0x14000600},
303 {0x00009880, 0x201fff00}, 282 {0x00009880, 0x201fff00},
304 {0x00009884, 0x00001042}, 283 {0x00009884, 0x00001042},
305 {0x000098a4, 0x00200400}, 284 {0x000098a4, 0x00200400},
306 {0x000098b0, 0x52440bbe}, 285 {0x000098b0, 0x32840bbe},
307 {0x000098d0, 0x004b6a8e}, 286 {0x000098d0, 0x004b6a8e},
308 {0x000098d4, 0x00000820}, 287 {0x000098d4, 0x00000820},
309 {0x000098dc, 0x00000000}, 288 {0x000098dc, 0x00000000},
@@ -329,7 +308,6 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
329 {0x00009e30, 0x06336f77}, 308 {0x00009e30, 0x06336f77},
330 {0x00009e34, 0x6af6532f}, 309 {0x00009e34, 0x6af6532f},
331 {0x00009e38, 0x0cc80c00}, 310 {0x00009e38, 0x0cc80c00},
332 {0x00009e3c, 0xcf946222},
333 {0x00009e40, 0x0d261820}, 311 {0x00009e40, 0x0d261820},
334 {0x00009e4c, 0x00001004}, 312 {0x00009e4c, 0x00001004},
335 {0x00009e50, 0x00ff03f1}, 313 {0x00009e50, 0x00ff03f1},
@@ -342,8 +320,6 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
342 {0x0000a220, 0x00000000}, 320 {0x0000a220, 0x00000000},
343 {0x0000a224, 0x00000000}, 321 {0x0000a224, 0x00000000},
344 {0x0000a228, 0x10002310}, 322 {0x0000a228, 0x10002310},
345 {0x0000a22c, 0x01036a1e},
346 {0x0000a234, 0x10000fff},
347 {0x0000a23c, 0x00000000}, 323 {0x0000a23c, 0x00000000},
348 {0x0000a244, 0x0c000000}, 324 {0x0000a244, 0x0c000000},
349 {0x0000a2a0, 0x00000001}, 325 {0x0000a2a0, 0x00000001},
@@ -351,10 +327,6 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
351 {0x0000a2c8, 0x00000000}, 327 {0x0000a2c8, 0x00000000},
352 {0x0000a2cc, 0x18c43433}, 328 {0x0000a2cc, 0x18c43433},
353 {0x0000a2d4, 0x00000000}, 329 {0x0000a2d4, 0x00000000},
354 {0x0000a2dc, 0x00000000},
355 {0x0000a2e0, 0x00000000},
356 {0x0000a2e4, 0x00000000},
357 {0x0000a2e8, 0x00000000},
358 {0x0000a2ec, 0x00000000}, 330 {0x0000a2ec, 0x00000000},
359 {0x0000a2f0, 0x00000000}, 331 {0x0000a2f0, 0x00000000},
360 {0x0000a2f4, 0x00000000}, 332 {0x0000a2f4, 0x00000000},
@@ -385,7 +357,7 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
385 {0x0000a3e8, 0x20202020}, 357 {0x0000a3e8, 0x20202020},
386 {0x0000a3ec, 0x20202020}, 358 {0x0000a3ec, 0x20202020},
387 {0x0000a3f0, 0x00000000}, 359 {0x0000a3f0, 0x00000000},
388 {0x0000a3f4, 0x00000246}, 360 {0x0000a3f4, 0x00000000},
389 {0x0000a3f8, 0x0cdbd380}, 361 {0x0000a3f8, 0x0cdbd380},
390 {0x0000a3fc, 0x000f0f01}, 362 {0x0000a3fc, 0x000f0f01},
391 {0x0000a400, 0x8fa91f01}, 363 {0x0000a400, 0x8fa91f01},
@@ -402,33 +374,17 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
402 {0x0000a430, 0x1ce739ce}, 374 {0x0000a430, 0x1ce739ce},
403 {0x0000a434, 0x00000000}, 375 {0x0000a434, 0x00000000},
404 {0x0000a438, 0x00001801}, 376 {0x0000a438, 0x00001801},
405 {0x0000a43c, 0x00000000}, 377 {0x0000a43c, 0x00100000},
406 {0x0000a440, 0x00000000}, 378 {0x0000a440, 0x00000000},
407 {0x0000a444, 0x00000000}, 379 {0x0000a444, 0x00000000},
408 {0x0000a448, 0x04000080}, 380 {0x0000a448, 0x05000080},
409 {0x0000a44c, 0x00000001}, 381 {0x0000a44c, 0x00000001},
410 {0x0000a450, 0x00010000}, 382 {0x0000a450, 0x00010000},
411 {0x0000a458, 0x00000000}, 383 {0x0000a458, 0x00000000},
412 {0x0000a600, 0x00000000},
413 {0x0000a604, 0x00000000},
414 {0x0000a608, 0x00000000},
415 {0x0000a60c, 0x00000000},
416 {0x0000a610, 0x00000000},
417 {0x0000a614, 0x00000000},
418 {0x0000a618, 0x00000000},
419 {0x0000a61c, 0x00000000},
420 {0x0000a620, 0x00000000},
421 {0x0000a624, 0x00000000},
422 {0x0000a628, 0x00000000},
423 {0x0000a62c, 0x00000000},
424 {0x0000a630, 0x00000000},
425 {0x0000a634, 0x00000000},
426 {0x0000a638, 0x00000000},
427 {0x0000a63c, 0x00000000},
428 {0x0000a640, 0x00000000}, 384 {0x0000a640, 0x00000000},
429 {0x0000a644, 0x3fad9d74}, 385 {0x0000a644, 0x3fad9d74},
430 {0x0000a648, 0x0048060a}, 386 {0x0000a648, 0x0048060a},
431 {0x0000a64c, 0x00000637}, 387 {0x0000a64c, 0x00003c37},
432 {0x0000a670, 0x03020100}, 388 {0x0000a670, 0x03020100},
433 {0x0000a674, 0x09080504}, 389 {0x0000a674, 0x09080504},
434 {0x0000a678, 0x0d0c0b0a}, 390 {0x0000a678, 0x0d0c0b0a},
@@ -451,10 +407,6 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
451 {0x0000a8f4, 0x00000000}, 407 {0x0000a8f4, 0x00000000},
452 {0x0000b2d0, 0x00000080}, 408 {0x0000b2d0, 0x00000080},
453 {0x0000b2d4, 0x00000000}, 409 {0x0000b2d4, 0x00000000},
454 {0x0000b2dc, 0x00000000},
455 {0x0000b2e0, 0x00000000},
456 {0x0000b2e4, 0x00000000},
457 {0x0000b2e8, 0x00000000},
458 {0x0000b2ec, 0x00000000}, 410 {0x0000b2ec, 0x00000000},
459 {0x0000b2f0, 0x00000000}, 411 {0x0000b2f0, 0x00000000},
460 {0x0000b2f4, 0x00000000}, 412 {0x0000b2f4, 0x00000000},
@@ -465,80 +417,108 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
465}; 417};
466 418
467static const u32 ar9340Modes_high_power_tx_gain_table_1p0[][5] = { 419static const u32 ar9340Modes_high_power_tx_gain_table_1p0[][5] = {
468 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 420 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
421 {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
422 {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
423 {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
424 {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
425 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
426 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
427 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
428 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
429 {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
430 {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
431 {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
432 {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
433 {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
434 {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
435 {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
436 {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
437 {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
438 {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
439 {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
440 {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
441 {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
442 {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
443 {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
444 {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
469 {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9}, 445 {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
470 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, 446 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
471 {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002}, 447 {0x0000a504, 0x04002222, 0x04002222, 0x02000001, 0x02000001},
472 {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004}, 448 {0x0000a508, 0x09002421, 0x09002421, 0x05000003, 0x05000003},
473 {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200}, 449 {0x0000a50c, 0x0d002621, 0x0d002621, 0x0a000005, 0x0a000005},
474 {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202}, 450 {0x0000a510, 0x13004620, 0x13004620, 0x0e000201, 0x0e000201},
475 {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400}, 451 {0x0000a514, 0x19004a20, 0x19004a20, 0x11000203, 0x11000203},
476 {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402}, 452 {0x0000a518, 0x1d004e20, 0x1d004e20, 0x14000401, 0x14000401},
477 {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404}, 453 {0x0000a51c, 0x21005420, 0x21005420, 0x18000403, 0x18000403},
478 {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603}, 454 {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000602, 0x1b000602},
479 {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02}, 455 {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000802, 0x1f000802},
480 {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04}, 456 {0x0000a528, 0x2f005e42, 0x2f005e42, 0x21000620, 0x21000620},
481 {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20}, 457 {0x0000a52c, 0x33005e44, 0x33005e44, 0x25000820, 0x25000820},
482 {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20}, 458 {0x0000a530, 0x38005e65, 0x38005e65, 0x29000822, 0x29000822},
483 {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22}, 459 {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2d000824, 0x2d000824},
484 {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24}, 460 {0x0000a538, 0x40005e6b, 0x40005e6b, 0x30000828, 0x30000828},
485 {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640}, 461 {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x3400082a, 0x3400082a},
486 {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660}, 462 {0x0000a540, 0x49005e72, 0x49005e72, 0x38000849, 0x38000849},
487 {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861}, 463 {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b000a2c, 0x3b000a2c},
488 {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81}, 464 {0x0000a548, 0x53005f12, 0x53005f12, 0x3e000e2b, 0x3e000e2b},
489 {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83}, 465 {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42000e2d, 0x42000e2d},
490 {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84}, 466 {0x0000a550, 0x5e025f12, 0x5e025f12, 0x4500124a, 0x4500124a},
491 {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3}, 467 {0x0000a554, 0x61027f12, 0x61027f12, 0x4900124c, 0x4900124c},
492 {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5}, 468 {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c00126c, 0x4c00126c},
493 {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9}, 469 {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x4f00128c, 0x4f00128c},
494 {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb}, 470 {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x52001290, 0x52001290},
495 {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, 471 {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
496 {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, 472 {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
497 {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, 473 {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
498 {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, 474 {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
499 {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, 475 {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
500 {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, 476 {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
501 {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, 477 {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
502 {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000}, 478 {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
503 {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002}, 479 {0x0000a584, 0x04802222, 0x04802222, 0x02800001, 0x02800001},
504 {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004}, 480 {0x0000a588, 0x09802421, 0x09802421, 0x05800003, 0x05800003},
505 {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200}, 481 {0x0000a58c, 0x0d802621, 0x0d802621, 0x0a800005, 0x0a800005},
506 {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202}, 482 {0x0000a590, 0x13804620, 0x13804620, 0x0e800201, 0x0e800201},
507 {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400}, 483 {0x0000a594, 0x19804a20, 0x19804a20, 0x11800203, 0x11800203},
508 {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402}, 484 {0x0000a598, 0x1d804e20, 0x1d804e20, 0x14800401, 0x14800401},
509 {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404}, 485 {0x0000a59c, 0x21805420, 0x21805420, 0x18800403, 0x18800403},
510 {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603}, 486 {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800602, 0x1b800602},
511 {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02}, 487 {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800802, 0x1f800802},
512 {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04}, 488 {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x21800620, 0x21800620},
513 {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20}, 489 {0x0000a5ac, 0x33805e44, 0x33805e44, 0x25800820, 0x25800820},
514 {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20}, 490 {0x0000a5b0, 0x38805e65, 0x38805e65, 0x29800822, 0x29800822},
515 {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22}, 491 {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2d800824, 0x2d800824},
516 {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24}, 492 {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x30800828, 0x30800828},
517 {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640}, 493 {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x3480082a, 0x3480082a},
518 {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660}, 494 {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38800849, 0x38800849},
519 {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861}, 495 {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b800a2c, 0x3b800a2c},
520 {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81}, 496 {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e800e2b, 0x3e800e2b},
521 {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83}, 497 {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42800e2d, 0x42800e2d},
522 {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84}, 498 {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x4580124a, 0x4580124a},
523 {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3}, 499 {0x0000a5d4, 0x61827f12, 0x61827f12, 0x4980124c, 0x4980124c},
524 {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5}, 500 {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c80126c, 0x4c80126c},
525 {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9}, 501 {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x4f80128c, 0x4f80128c},
526 {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb}, 502 {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x52801290, 0x52801290},
527 {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, 503 {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
528 {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, 504 {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
529 {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, 505 {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
530 {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, 506 {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
531 {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, 507 {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
532 {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, 508 {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
533 {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, 509 {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
534 {0x00016044, 0x056db2db, 0x056db2db, 0x056db2db, 0x056db2db}, 510 {0x00016044, 0x056db2db, 0x056db2db, 0x022492db, 0x022492db},
535 {0x00016048, 0x24925266, 0x24925266, 0x24925266, 0x24925266}, 511 {0x00016048, 0x24925266, 0x24925266, 0x24925266, 0x24925266},
536 {0x00016444, 0x056db2db, 0x056db2db, 0x056db2db, 0x056db2db}, 512 {0x00016444, 0x056db2db, 0x056db2db, 0x022492db, 0x022492db},
537 {0x00016448, 0x24925266, 0x24925266, 0x24925266, 0x24925266}, 513 {0x00016448, 0x24925266, 0x24925266, 0x24925266, 0x24925266},
538}; 514};
539 515
540static const u32 ar9340Modes_high_ob_db_tx_gain_table_1p0[][5] = { 516static const u32 ar9340Modes_high_ob_db_tx_gain_table_1p0[][5] = {
541 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 517 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
518 {0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
519 {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
520 {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
521 {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
542 {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9}, 522 {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
543 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, 523 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
544 {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002}, 524 {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
@@ -559,7 +539,7 @@ static const u32 ar9340Modes_high_ob_db_tx_gain_table_1p0[][5] = {
559 {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660}, 539 {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
560 {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861}, 540 {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
561 {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81}, 541 {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
562 {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83}, 542 {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
563 {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84}, 543 {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
564 {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3}, 544 {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
565 {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5}, 545 {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
@@ -604,13 +584,43 @@ static const u32 ar9340Modes_high_ob_db_tx_gain_table_1p0[][5] = {
604 {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, 584 {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
605 {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, 585 {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
606 {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, 586 {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
587 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
588 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
589 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
590 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
591 {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
592 {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
593 {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
594 {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
595 {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
596 {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
597 {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
598 {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
599 {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
600 {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
601 {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
602 {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
603 {0x0000b2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
604 {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
605 {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
606 {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
607 {0x00016044, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4}, 607 {0x00016044, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4},
608 {0x00016048, 0x8e481266, 0x8e481266, 0x8e481266, 0x8e481266}, 608 {0x00016048, 0x8e481666, 0x8e481666, 0x8e481266, 0x8e481266},
609 {0x00016280, 0x01000015, 0x01000015, 0x01001015, 0x01001015},
609 {0x00016444, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4}, 610 {0x00016444, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4},
610 {0x00016448, 0x8e481266, 0x8e481266, 0x8e481266, 0x8e481266}, 611 {0x00016448, 0x8e481666, 0x8e481666, 0x8e481266, 0x8e481266},
611}; 612};
613
612static const u32 ar9340Modes_ub124_tx_gain_table_1p0[][5] = { 614static const u32 ar9340Modes_ub124_tx_gain_table_1p0[][5] = {
613 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 615 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
616 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
617 {0x00009820, 0x206a022e, 0x206a022e, 0x206a00ae, 0x206a00ae},
618 {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
619 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec82d2e, 0x7ec82d2e},
620 {0x0000a2dc, 0xfef5d402, 0xfef5d402, 0xfdab5b52, 0xfdab5b52},
621 {0x0000a2e0, 0xfe896600, 0xfe896600, 0xfd339c84, 0xfd339c84},
622 {0x0000a2e4, 0xff01f800, 0xff01f800, 0xfec3e000, 0xfec3e000},
623 {0x0000a2e8, 0xfffe0000, 0xfffe0000, 0xfffc0000, 0xfffc0000},
614 {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9}, 624 {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
615 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, 625 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
616 {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002}, 626 {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
@@ -676,15 +686,34 @@ static const u32 ar9340Modes_ub124_tx_gain_table_1p0[][5] = {
676 {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, 686 {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
677 {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, 687 {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
678 {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, 688 {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
679 {0x00016044, 0x036db2db, 0x036db2db, 0x036db2db, 0x036db2db}, 689 {0x00016044, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4},
680 {0x00016048, 0x69b65266, 0x69b65266, 0x69b65266, 0x69b65266}, 690 {0x00016048, 0x8e480086, 0x8e480086, 0x8e480086, 0x8e480086},
681 {0x00016444, 0x036db2db, 0x036db2db, 0x036db2db, 0x036db2db}, 691 {0x00016444, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4},
682 {0x00016448, 0x69b65266, 0x69b65266, 0x69b65266, 0x69b65266}, 692 {0x00016448, 0x8e480086, 0x8e480086, 0x8e480086, 0x8e480086},
693 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
694 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
695 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
696 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
697 {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
698 {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
699 {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
700 {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
701 {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
702 {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
703 {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
704 {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
705 {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
706 {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
707 {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
708 {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
709 {0x0000b2dc, 0xfef5d402, 0xfef5d402, 0xfdab5b52, 0xfdab5b52},
710 {0x0000b2e0, 0xfe896600, 0xfe896600, 0xfd339c84, 0xfd339c84},
711 {0x0000b2e4, 0xff01f800, 0xff01f800, 0xfec3e000, 0xfec3e000},
712 {0x0000b2e8, 0xfffe0000, 0xfffe0000, 0xfffc0000, 0xfffc0000},
683}; 713};
684 714
685
686static const u32 ar9340Common_rx_gain_table_1p0[][2] = { 715static const u32 ar9340Common_rx_gain_table_1p0[][2] = {
687 /* Addr allmodes */ 716 /* Addr allmodes */
688 {0x0000a000, 0x00010000}, 717 {0x0000a000, 0x00010000},
689 {0x0000a004, 0x00030002}, 718 {0x0000a004, 0x00030002},
690 {0x0000a008, 0x00050004}, 719 {0x0000a008, 0x00050004},
@@ -845,14 +874,14 @@ static const u32 ar9340Common_rx_gain_table_1p0[][2] = {
845 {0x0000b074, 0x00000000}, 874 {0x0000b074, 0x00000000},
846 {0x0000b078, 0x00000000}, 875 {0x0000b078, 0x00000000},
847 {0x0000b07c, 0x00000000}, 876 {0x0000b07c, 0x00000000},
848 {0x0000b080, 0x32323232}, 877 {0x0000b080, 0x23232323},
849 {0x0000b084, 0x2f2f3232}, 878 {0x0000b084, 0x21232323},
850 {0x0000b088, 0x23282a2d}, 879 {0x0000b088, 0x19191c1e},
851 {0x0000b08c, 0x1c1e2123}, 880 {0x0000b08c, 0x12141417},
852 {0x0000b090, 0x14171919}, 881 {0x0000b090, 0x07070e0e},
853 {0x0000b094, 0x0e0e1214}, 882 {0x0000b094, 0x03030305},
854 {0x0000b098, 0x03050707}, 883 {0x0000b098, 0x00000003},
855 {0x0000b09c, 0x00030303}, 884 {0x0000b09c, 0x00000000},
856 {0x0000b0a0, 0x00000000}, 885 {0x0000b0a0, 0x00000000},
857 {0x0000b0a4, 0x00000000}, 886 {0x0000b0a4, 0x00000000},
858 {0x0000b0a8, 0x00000000}, 887 {0x0000b0a8, 0x00000000},
@@ -944,7 +973,11 @@ static const u32 ar9340Common_rx_gain_table_1p0[][2] = {
944}; 973};
945 974
946static const u32 ar9340Modes_low_ob_db_tx_gain_table_1p0[][5] = { 975static const u32 ar9340Modes_low_ob_db_tx_gain_table_1p0[][5] = {
947 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 976 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
977 {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
978 {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
979 {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
980 {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
948 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, 981 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
949 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 982 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
950 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002}, 983 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
@@ -952,8 +985,8 @@ static const u32 ar9340Modes_low_ob_db_tx_gain_table_1p0[][5] = {
952 {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200}, 985 {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
953 {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202}, 986 {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
954 {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400}, 987 {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
955 {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402}, 988 {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402},
956 {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404}, 989 {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
957 {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603}, 990 {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
958 {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02}, 991 {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
959 {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04}, 992 {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
@@ -965,19 +998,19 @@ static const u32 ar9340Modes_low_ob_db_tx_gain_table_1p0[][5] = {
965 {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660}, 998 {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
966 {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861}, 999 {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
967 {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81}, 1000 {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
968 {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83}, 1001 {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x47001a83, 0x47001a83},
969 {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84}, 1002 {0x0000a550, 0x61024a6c, 0x61024a6c, 0x4a001c84, 0x4a001c84},
970 {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3}, 1003 {0x0000a554, 0x66026a6c, 0x66026a6c, 0x4e001ce3, 0x4e001ce3},
971 {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5}, 1004 {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x52001ce5, 0x52001ce5},
972 {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9}, 1005 {0x0000a55c, 0x7002708c, 0x7002708c, 0x56001ce9, 0x56001ce9},
973 {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb}, 1006 {0x0000a560, 0x7302b08a, 0x7302b08a, 0x5a001ceb, 0x5a001ceb},
974 {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec}, 1007 {0x0000a564, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
975 {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec}, 1008 {0x0000a568, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
976 {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec}, 1009 {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
977 {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec}, 1010 {0x0000a570, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
978 {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec}, 1011 {0x0000a574, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
979 {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec}, 1012 {0x0000a578, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
980 {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec}, 1013 {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
981 {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000}, 1014 {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
982 {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002}, 1015 {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
983 {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004}, 1016 {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
@@ -1010,14 +1043,40 @@ static const u32 ar9340Modes_low_ob_db_tx_gain_table_1p0[][5] = {
1010 {0x0000a5f4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec}, 1043 {0x0000a5f4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
1011 {0x0000a5f8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec}, 1044 {0x0000a5f8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
1012 {0x0000a5fc, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec}, 1045 {0x0000a5fc, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
1046 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1047 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1048 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1049 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1050 {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1051 {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
1052 {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
1053 {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
1054 {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
1055 {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
1056 {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
1057 {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
1058 {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
1059 {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
1060 {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
1061 {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
1062 {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
1063 {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
1064 {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
1065 {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
1013 {0x00016044, 0x056db2db, 0x056db2db, 0x056db2db, 0x056db2db}, 1066 {0x00016044, 0x056db2db, 0x056db2db, 0x056db2db, 0x056db2db},
1014 {0x00016048, 0x24925266, 0x24925266, 0x24925266, 0x24925266}, 1067 {0x00016048, 0x24925666, 0x24925666, 0x24925266, 0x24925266},
1068 {0x00016280, 0x01000015, 0x01000015, 0x01001015, 0x01001015},
1069 {0x00016288, 0xf0318000, 0xf0318000, 0xf0318000, 0xf0318000},
1015 {0x00016444, 0x056db2db, 0x056db2db, 0x056db2db, 0x056db2db}, 1070 {0x00016444, 0x056db2db, 0x056db2db, 0x056db2db, 0x056db2db},
1016 {0x00016448, 0x24925266, 0x24925266, 0x24925266, 0x24925266}, 1071 {0x00016448, 0x24925666, 0x24925666, 0x24925266, 0x24925266},
1017}; 1072};
1018 1073
1019static const u32 ar9340Modes_mixed_ob_db_tx_gain_table_1p0[][5] = { 1074static const u32 ar9340Modes_mixed_ob_db_tx_gain_table_1p0[][5] = {
1020 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 1075 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1076 {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
1077 {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
1078 {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
1079 {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
1021 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, 1080 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
1022 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1081 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1023 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002}, 1082 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
@@ -1025,8 +1084,8 @@ static const u32 ar9340Modes_mixed_ob_db_tx_gain_table_1p0[][5] = {
1025 {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200}, 1084 {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
1026 {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202}, 1085 {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
1027 {0x0000a514, 0x1c000223, 0x1c000223, 0x11000400, 0x11000400}, 1086 {0x0000a514, 0x1c000223, 0x1c000223, 0x11000400, 0x11000400},
1028 {0x0000a518, 0x21020220, 0x21020220, 0x15000402, 0x15000402}, 1087 {0x0000a518, 0x21002220, 0x21002220, 0x15000402, 0x15000402},
1029 {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404}, 1088 {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
1030 {0x0000a520, 0x2b022220, 0x2b022220, 0x1b000603, 0x1b000603}, 1089 {0x0000a520, 0x2b022220, 0x2b022220, 0x1b000603, 0x1b000603},
1031 {0x0000a524, 0x2f022222, 0x2f022222, 0x1f000a02, 0x1f000a02}, 1090 {0x0000a524, 0x2f022222, 0x2f022222, 0x1f000a02, 0x1f000a02},
1032 {0x0000a528, 0x34022225, 0x34022225, 0x23000a04, 0x23000a04}, 1091 {0x0000a528, 0x34022225, 0x34022225, 0x23000a04, 0x23000a04},
@@ -1038,19 +1097,19 @@ static const u32 ar9340Modes_mixed_ob_db_tx_gain_table_1p0[][5] = {
1038 {0x0000a540, 0x4e02246c, 0x4e02246c, 0x38001660, 0x38001660}, 1097 {0x0000a540, 0x4e02246c, 0x4e02246c, 0x38001660, 0x38001660},
1039 {0x0000a544, 0x5302266c, 0x5302266c, 0x3b001861, 0x3b001861}, 1098 {0x0000a544, 0x5302266c, 0x5302266c, 0x3b001861, 0x3b001861},
1040 {0x0000a548, 0x5702286c, 0x5702286c, 0x3e001a81, 0x3e001a81}, 1099 {0x0000a548, 0x5702286c, 0x5702286c, 0x3e001a81, 0x3e001a81},
1041 {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x42001a83, 0x42001a83}, 1100 {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x42001a83, 0x42001a83},
1042 {0x0000a550, 0x61042a6c, 0x61042a6c, 0x44001c84, 0x44001c84}, 1101 {0x0000a550, 0x61024a6c, 0x61024a6c, 0x44001c84, 0x44001c84},
1043 {0x0000a554, 0x66062a6c, 0x66062a6c, 0x48001ce3, 0x48001ce3}, 1102 {0x0000a554, 0x66026a6c, 0x66026a6c, 0x48001ce3, 0x48001ce3},
1044 {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x4c001ce5, 0x4c001ce5}, 1103 {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x4c001ce5, 0x4c001ce5},
1045 {0x0000a55c, 0x7006308c, 0x7006308c, 0x50001ce9, 0x50001ce9}, 1104 {0x0000a55c, 0x7002708c, 0x7002708c, 0x50001ce9, 0x50001ce9},
1046 {0x0000a560, 0x730a308a, 0x730a308a, 0x54001ceb, 0x54001ceb}, 1105 {0x0000a560, 0x7302b08a, 0x7302b08a, 0x54001ceb, 0x54001ceb},
1047 {0x0000a564, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec}, 1106 {0x0000a564, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
1048 {0x0000a568, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec}, 1107 {0x0000a568, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
1049 {0x0000a56c, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec}, 1108 {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
1050 {0x0000a570, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec}, 1109 {0x0000a570, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
1051 {0x0000a574, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec}, 1110 {0x0000a574, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
1052 {0x0000a578, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec}, 1111 {0x0000a578, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
1053 {0x0000a57c, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec}, 1112 {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
1054 {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000}, 1113 {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
1055 {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002}, 1114 {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
1056 {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004}, 1115 {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
@@ -1083,14 +1142,36 @@ static const u32 ar9340Modes_mixed_ob_db_tx_gain_table_1p0[][5] = {
1083 {0x0000a5f4, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec}, 1142 {0x0000a5f4, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
1084 {0x0000a5f8, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec}, 1143 {0x0000a5f8, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
1085 {0x0000a5fc, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec}, 1144 {0x0000a5fc, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
1145 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1146 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1147 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1148 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1149 {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1150 {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
1151 {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
1152 {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
1153 {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
1154 {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
1155 {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
1156 {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
1157 {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
1158 {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
1159 {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
1160 {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
1161 {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
1162 {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
1163 {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
1164 {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
1086 {0x00016044, 0x056db2db, 0x056db2db, 0x03b6d2e4, 0x03b6d2e4}, 1165 {0x00016044, 0x056db2db, 0x056db2db, 0x03b6d2e4, 0x03b6d2e4},
1087 {0x00016048, 0x24927266, 0x24927266, 0x8e483266, 0x8e483266}, 1166 {0x00016048, 0x24925666, 0x24925666, 0x8e481266, 0x8e481266},
1167 {0x00016280, 0x01000015, 0x01000015, 0x01001015, 0x01001015},
1168 {0x00016288, 0x30318000, 0x30318000, 0x00318000, 0x00318000},
1088 {0x00016444, 0x056db2db, 0x056db2db, 0x03b6d2e4, 0x03b6d2e4}, 1169 {0x00016444, 0x056db2db, 0x056db2db, 0x03b6d2e4, 0x03b6d2e4},
1089 {0x00016448, 0x24927266, 0x24927266, 0x8e482266, 0x8e482266}, 1170 {0x00016448, 0x24925666, 0x24925666, 0x8e481266, 0x8e481266},
1090}; 1171};
1091 1172
1092static const u32 ar9340_1p0_mac_core[][2] = { 1173static const u32 ar9340_1p0_mac_core[][2] = {
1093 /* Addr allmodes */ 1174 /* Addr allmodes */
1094 {0x00000008, 0x00000000}, 1175 {0x00000008, 0x00000000},
1095 {0x00000030, 0x00020085}, 1176 {0x00000030, 0x00020085},
1096 {0x00000034, 0x00000005}, 1177 {0x00000034, 0x00000005},
@@ -1119,6 +1200,7 @@ static const u32 ar9340_1p0_mac_core[][2] = {
1119 {0x00008004, 0x00000000}, 1200 {0x00008004, 0x00000000},
1120 {0x00008008, 0x00000000}, 1201 {0x00008008, 0x00000000},
1121 {0x0000800c, 0x00000000}, 1202 {0x0000800c, 0x00000000},
1203 {0x00008010, 0x00080800},
1122 {0x00008018, 0x00000000}, 1204 {0x00008018, 0x00000000},
1123 {0x00008020, 0x00000000}, 1205 {0x00008020, 0x00000000},
1124 {0x00008038, 0x00000000}, 1206 {0x00008038, 0x00000000},
@@ -1146,7 +1228,7 @@ static const u32 ar9340_1p0_mac_core[][2] = {
1146 {0x000080bc, 0x00000000}, 1228 {0x000080bc, 0x00000000},
1147 {0x000080c0, 0x2a800000}, 1229 {0x000080c0, 0x2a800000},
1148 {0x000080c4, 0x06900168}, 1230 {0x000080c4, 0x06900168},
1149 {0x000080c8, 0x13881c20}, 1231 {0x000080c8, 0x13881c22},
1150 {0x000080cc, 0x01f40000}, 1232 {0x000080cc, 0x01f40000},
1151 {0x000080d0, 0x00252500}, 1233 {0x000080d0, 0x00252500},
1152 {0x000080d4, 0x00a00000}, 1234 {0x000080d4, 0x00a00000},
@@ -1250,276 +1332,17 @@ static const u32 ar9340_1p0_mac_core[][2] = {
1250 {0x000083c4, 0x00000000}, 1332 {0x000083c4, 0x00000000},
1251 {0x000083c8, 0x00000000}, 1333 {0x000083c8, 0x00000000},
1252 {0x000083cc, 0x00000200}, 1334 {0x000083cc, 0x00000200},
1253 {0x000083d0, 0x000301ff}, 1335 {0x000083d0, 0x000101ff},
1254}; 1336};
1255 1337
1256static const u32 ar9340Common_wo_xlna_rx_gain_table_1p0[][2] = { 1338#define ar9340Common_wo_xlna_rx_gain_table_1p0 ar9300Common_wo_xlna_rx_gain_table_2p2
1257 /* Addr allmodes */
1258 {0x0000a000, 0x00010000},
1259 {0x0000a004, 0x00030002},
1260 {0x0000a008, 0x00050004},
1261 {0x0000a00c, 0x00810080},
1262 {0x0000a010, 0x00830082},
1263 {0x0000a014, 0x01810180},
1264 {0x0000a018, 0x01830182},
1265 {0x0000a01c, 0x01850184},
1266 {0x0000a020, 0x01890188},
1267 {0x0000a024, 0x018b018a},
1268 {0x0000a028, 0x018d018c},
1269 {0x0000a02c, 0x03820190},
1270 {0x0000a030, 0x03840383},
1271 {0x0000a034, 0x03880385},
1272 {0x0000a038, 0x038a0389},
1273 {0x0000a03c, 0x038c038b},
1274 {0x0000a040, 0x0390038d},
1275 {0x0000a044, 0x03920391},
1276 {0x0000a048, 0x03940393},
1277 {0x0000a04c, 0x03960395},
1278 {0x0000a050, 0x00000000},
1279 {0x0000a054, 0x00000000},
1280 {0x0000a058, 0x00000000},
1281 {0x0000a05c, 0x00000000},
1282 {0x0000a060, 0x00000000},
1283 {0x0000a064, 0x00000000},
1284 {0x0000a068, 0x00000000},
1285 {0x0000a06c, 0x00000000},
1286 {0x0000a070, 0x00000000},
1287 {0x0000a074, 0x00000000},
1288 {0x0000a078, 0x00000000},
1289 {0x0000a07c, 0x00000000},
1290 {0x0000a080, 0x29292929},
1291 {0x0000a084, 0x29292929},
1292 {0x0000a088, 0x29292929},
1293 {0x0000a08c, 0x29292929},
1294 {0x0000a090, 0x22292929},
1295 {0x0000a094, 0x1d1d2222},
1296 {0x0000a098, 0x0c111117},
1297 {0x0000a09c, 0x00030303},
1298 {0x0000a0a0, 0x00000000},
1299 {0x0000a0a4, 0x00000000},
1300 {0x0000a0a8, 0x00000000},
1301 {0x0000a0ac, 0x00000000},
1302 {0x0000a0b0, 0x00000000},
1303 {0x0000a0b4, 0x00000000},
1304 {0x0000a0b8, 0x00000000},
1305 {0x0000a0bc, 0x00000000},
1306 {0x0000a0c0, 0x001f0000},
1307 {0x0000a0c4, 0x01000101},
1308 {0x0000a0c8, 0x011e011f},
1309 {0x0000a0cc, 0x011c011d},
1310 {0x0000a0d0, 0x02030204},
1311 {0x0000a0d4, 0x02010202},
1312 {0x0000a0d8, 0x021f0200},
1313 {0x0000a0dc, 0x0302021e},
1314 {0x0000a0e0, 0x03000301},
1315 {0x0000a0e4, 0x031e031f},
1316 {0x0000a0e8, 0x0402031d},
1317 {0x0000a0ec, 0x04000401},
1318 {0x0000a0f0, 0x041e041f},
1319 {0x0000a0f4, 0x0502041d},
1320 {0x0000a0f8, 0x05000501},
1321 {0x0000a0fc, 0x051e051f},
1322 {0x0000a100, 0x06010602},
1323 {0x0000a104, 0x061f0600},
1324 {0x0000a108, 0x061d061e},
1325 {0x0000a10c, 0x07020703},
1326 {0x0000a110, 0x07000701},
1327 {0x0000a114, 0x00000000},
1328 {0x0000a118, 0x00000000},
1329 {0x0000a11c, 0x00000000},
1330 {0x0000a120, 0x00000000},
1331 {0x0000a124, 0x00000000},
1332 {0x0000a128, 0x00000000},
1333 {0x0000a12c, 0x00000000},
1334 {0x0000a130, 0x00000000},
1335 {0x0000a134, 0x00000000},
1336 {0x0000a138, 0x00000000},
1337 {0x0000a13c, 0x00000000},
1338 {0x0000a140, 0x001f0000},
1339 {0x0000a144, 0x01000101},
1340 {0x0000a148, 0x011e011f},
1341 {0x0000a14c, 0x011c011d},
1342 {0x0000a150, 0x02030204},
1343 {0x0000a154, 0x02010202},
1344 {0x0000a158, 0x021f0200},
1345 {0x0000a15c, 0x0302021e},
1346 {0x0000a160, 0x03000301},
1347 {0x0000a164, 0x031e031f},
1348 {0x0000a168, 0x0402031d},
1349 {0x0000a16c, 0x04000401},
1350 {0x0000a170, 0x041e041f},
1351 {0x0000a174, 0x0502041d},
1352 {0x0000a178, 0x05000501},
1353 {0x0000a17c, 0x051e051f},
1354 {0x0000a180, 0x06010602},
1355 {0x0000a184, 0x061f0600},
1356 {0x0000a188, 0x061d061e},
1357 {0x0000a18c, 0x07020703},
1358 {0x0000a190, 0x07000701},
1359 {0x0000a194, 0x00000000},
1360 {0x0000a198, 0x00000000},
1361 {0x0000a19c, 0x00000000},
1362 {0x0000a1a0, 0x00000000},
1363 {0x0000a1a4, 0x00000000},
1364 {0x0000a1a8, 0x00000000},
1365 {0x0000a1ac, 0x00000000},
1366 {0x0000a1b0, 0x00000000},
1367 {0x0000a1b4, 0x00000000},
1368 {0x0000a1b8, 0x00000000},
1369 {0x0000a1bc, 0x00000000},
1370 {0x0000a1c0, 0x00000000},
1371 {0x0000a1c4, 0x00000000},
1372 {0x0000a1c8, 0x00000000},
1373 {0x0000a1cc, 0x00000000},
1374 {0x0000a1d0, 0x00000000},
1375 {0x0000a1d4, 0x00000000},
1376 {0x0000a1d8, 0x00000000},
1377 {0x0000a1dc, 0x00000000},
1378 {0x0000a1e0, 0x00000000},
1379 {0x0000a1e4, 0x00000000},
1380 {0x0000a1e8, 0x00000000},
1381 {0x0000a1ec, 0x00000000},
1382 {0x0000a1f0, 0x00000396},
1383 {0x0000a1f4, 0x00000396},
1384 {0x0000a1f8, 0x00000396},
1385 {0x0000a1fc, 0x00000196},
1386 {0x0000b000, 0x00010000},
1387 {0x0000b004, 0x00030002},
1388 {0x0000b008, 0x00050004},
1389 {0x0000b00c, 0x00810080},
1390 {0x0000b010, 0x00830082},
1391 {0x0000b014, 0x01810180},
1392 {0x0000b018, 0x01830182},
1393 {0x0000b01c, 0x01850184},
1394 {0x0000b020, 0x02810280},
1395 {0x0000b024, 0x02830282},
1396 {0x0000b028, 0x02850284},
1397 {0x0000b02c, 0x02890288},
1398 {0x0000b030, 0x028b028a},
1399 {0x0000b034, 0x0388028c},
1400 {0x0000b038, 0x038a0389},
1401 {0x0000b03c, 0x038c038b},
1402 {0x0000b040, 0x0390038d},
1403 {0x0000b044, 0x03920391},
1404 {0x0000b048, 0x03940393},
1405 {0x0000b04c, 0x03960395},
1406 {0x0000b050, 0x00000000},
1407 {0x0000b054, 0x00000000},
1408 {0x0000b058, 0x00000000},
1409 {0x0000b05c, 0x00000000},
1410 {0x0000b060, 0x00000000},
1411 {0x0000b064, 0x00000000},
1412 {0x0000b068, 0x00000000},
1413 {0x0000b06c, 0x00000000},
1414 {0x0000b070, 0x00000000},
1415 {0x0000b074, 0x00000000},
1416 {0x0000b078, 0x00000000},
1417 {0x0000b07c, 0x00000000},
1418 {0x0000b080, 0x32323232},
1419 {0x0000b084, 0x2f2f3232},
1420 {0x0000b088, 0x23282a2d},
1421 {0x0000b08c, 0x1c1e2123},
1422 {0x0000b090, 0x14171919},
1423 {0x0000b094, 0x0e0e1214},
1424 {0x0000b098, 0x03050707},
1425 {0x0000b09c, 0x00030303},
1426 {0x0000b0a0, 0x00000000},
1427 {0x0000b0a4, 0x00000000},
1428 {0x0000b0a8, 0x00000000},
1429 {0x0000b0ac, 0x00000000},
1430 {0x0000b0b0, 0x00000000},
1431 {0x0000b0b4, 0x00000000},
1432 {0x0000b0b8, 0x00000000},
1433 {0x0000b0bc, 0x00000000},
1434 {0x0000b0c0, 0x003f0020},
1435 {0x0000b0c4, 0x00400041},
1436 {0x0000b0c8, 0x0140005f},
1437 {0x0000b0cc, 0x0160015f},
1438 {0x0000b0d0, 0x017e017f},
1439 {0x0000b0d4, 0x02410242},
1440 {0x0000b0d8, 0x025f0240},
1441 {0x0000b0dc, 0x027f0260},
1442 {0x0000b0e0, 0x0341027e},
1443 {0x0000b0e4, 0x035f0340},
1444 {0x0000b0e8, 0x037f0360},
1445 {0x0000b0ec, 0x04400441},
1446 {0x0000b0f0, 0x0460045f},
1447 {0x0000b0f4, 0x0541047f},
1448 {0x0000b0f8, 0x055f0540},
1449 {0x0000b0fc, 0x057f0560},
1450 {0x0000b100, 0x06400641},
1451 {0x0000b104, 0x0660065f},
1452 {0x0000b108, 0x067e067f},
1453 {0x0000b10c, 0x07410742},
1454 {0x0000b110, 0x075f0740},
1455 {0x0000b114, 0x077f0760},
1456 {0x0000b118, 0x07800781},
1457 {0x0000b11c, 0x07a0079f},
1458 {0x0000b120, 0x07c107bf},
1459 {0x0000b124, 0x000007c0},
1460 {0x0000b128, 0x00000000},
1461 {0x0000b12c, 0x00000000},
1462 {0x0000b130, 0x00000000},
1463 {0x0000b134, 0x00000000},
1464 {0x0000b138, 0x00000000},
1465 {0x0000b13c, 0x00000000},
1466 {0x0000b140, 0x003f0020},
1467 {0x0000b144, 0x00400041},
1468 {0x0000b148, 0x0140005f},
1469 {0x0000b14c, 0x0160015f},
1470 {0x0000b150, 0x017e017f},
1471 {0x0000b154, 0x02410242},
1472 {0x0000b158, 0x025f0240},
1473 {0x0000b15c, 0x027f0260},
1474 {0x0000b160, 0x0341027e},
1475 {0x0000b164, 0x035f0340},
1476 {0x0000b168, 0x037f0360},
1477 {0x0000b16c, 0x04400441},
1478 {0x0000b170, 0x0460045f},
1479 {0x0000b174, 0x0541047f},
1480 {0x0000b178, 0x055f0540},
1481 {0x0000b17c, 0x057f0560},
1482 {0x0000b180, 0x06400641},
1483 {0x0000b184, 0x0660065f},
1484 {0x0000b188, 0x067e067f},
1485 {0x0000b18c, 0x07410742},
1486 {0x0000b190, 0x075f0740},
1487 {0x0000b194, 0x077f0760},
1488 {0x0000b198, 0x07800781},
1489 {0x0000b19c, 0x07a0079f},
1490 {0x0000b1a0, 0x07c107bf},
1491 {0x0000b1a4, 0x000007c0},
1492 {0x0000b1a8, 0x00000000},
1493 {0x0000b1ac, 0x00000000},
1494 {0x0000b1b0, 0x00000000},
1495 {0x0000b1b4, 0x00000000},
1496 {0x0000b1b8, 0x00000000},
1497 {0x0000b1bc, 0x00000000},
1498 {0x0000b1c0, 0x00000000},
1499 {0x0000b1c4, 0x00000000},
1500 {0x0000b1c8, 0x00000000},
1501 {0x0000b1cc, 0x00000000},
1502 {0x0000b1d0, 0x00000000},
1503 {0x0000b1d4, 0x00000000},
1504 {0x0000b1d8, 0x00000000},
1505 {0x0000b1dc, 0x00000000},
1506 {0x0000b1e0, 0x00000000},
1507 {0x0000b1e4, 0x00000000},
1508 {0x0000b1e8, 0x00000000},
1509 {0x0000b1ec, 0x00000000},
1510 {0x0000b1f0, 0x00000396},
1511 {0x0000b1f4, 0x00000396},
1512 {0x0000b1f8, 0x00000396},
1513 {0x0000b1fc, 0x00000196},
1514};
1515 1339
1516static const u32 ar9340_1p0_soc_preamble[][2] = { 1340static const u32 ar9340_1p0_soc_preamble[][2] = {
1517 /* Addr allmodes */ 1341 /* Addr allmodes */
1518 {0x000040a4, 0x00a0c1c9},
1519 {0x00007008, 0x00000000}, 1342 {0x00007008, 0x00000000},
1520 {0x00007020, 0x00000000}, 1343 {0x00007020, 0x00000000},
1521 {0x00007034, 0x00000002}, 1344 {0x00007034, 0x00000002},
1522 {0x00007038, 0x000004c2}, 1345 {0x00007038, 0x000004c2},
1523}; 1346};
1524 1347
1525#endif 1348#endif /* INITVALS_9340_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index 1d6658e139b5..4ef7dcccaa2f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2010 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
3 * 4 *
4 * Permission to use, copy, modify, and/or distribute this software for any 5 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 6 * purpose with or without fee is hereby granted, provided that the above
@@ -52,7 +53,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
52 {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020}, 53 {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
53 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8}, 54 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8},
54 {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e}, 55 {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e},
55 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3376605e, 0x33795d5e}, 56 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3376605e, 0x32395d5e},
56 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 57 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
57 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 58 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
58 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, 59 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
@@ -61,7 +62,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
61 {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27}, 62 {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
62 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, 63 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
63 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, 64 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
64 {0x0000a204, 0x013187c0, 0x013187c4, 0x013187c4, 0x013187c0}, 65 {0x0000a204, 0x01318fc0, 0x01318fc4, 0x01318fc4, 0x01318fc0},
65 {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004}, 66 {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
66 {0x0000a22c, 0x01026a2f, 0x01026a27, 0x01026a2f, 0x01026a2f}, 67 {0x0000a22c, 0x01026a2f, 0x01026a27, 0x01026a2f, 0x01026a2f},
67 {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b}, 68 {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
@@ -958,7 +959,7 @@ static const u32 ar9462_2p0_radio_core[][2] = {
958 {0x0001604c, 0x2699e04f}, 959 {0x0001604c, 0x2699e04f},
959 {0x00016050, 0x6db6db6c}, 960 {0x00016050, 0x6db6db6c},
960 {0x00016058, 0x6c200000}, 961 {0x00016058, 0x6c200000},
961 {0x00016080, 0x00040000}, 962 {0x00016080, 0x000c0000},
962 {0x00016084, 0x9a68048c}, 963 {0x00016084, 0x9a68048c},
963 {0x00016088, 0x54214514}, 964 {0x00016088, 0x54214514},
964 {0x0001608c, 0x1203040b}, 965 {0x0001608c, 0x1203040b},
@@ -981,7 +982,7 @@ static const u32 ar9462_2p0_radio_core[][2] = {
981 {0x00016144, 0x02084080}, 982 {0x00016144, 0x02084080},
982 {0x00016148, 0x000080c0}, 983 {0x00016148, 0x000080c0},
983 {0x00016280, 0x050a0001}, 984 {0x00016280, 0x050a0001},
984 {0x00016284, 0x3d841400}, 985 {0x00016284, 0x3d841418},
985 {0x00016288, 0x00000000}, 986 {0x00016288, 0x00000000},
986 {0x0001628c, 0xe3000000}, 987 {0x0001628c, 0xe3000000},
987 {0x00016290, 0xa1005080}, 988 {0x00016290, 0xa1005080},
@@ -1007,6 +1008,7 @@ static const u32 ar9462_2p0_radio_core[][2] = {
1007 1008
1008static const u32 ar9462_2p0_soc_preamble[][2] = { 1009static const u32 ar9462_2p0_soc_preamble[][2] = {
1009 /* Addr allmodes */ 1010 /* Addr allmodes */
1011 {0x000040a4, 0x00a0c1c9},
1010 {0x00007020, 0x00000000}, 1012 {0x00007020, 0x00000000},
1011 {0x00007034, 0x00000002}, 1013 {0x00007034, 0x00000002},
1012 {0x00007038, 0x000004c2}, 1014 {0x00007038, 0x000004c2},
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
index d16d029f81a9..fb4497fc7a3d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2010-2011 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
3 * 4 *
4 * Permission to use, copy, modify, and/or distribute this software for any 5 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 6 * purpose with or without fee is hereby granted, provided that the above
@@ -17,360 +18,151 @@
17#ifndef INITVALS_9485_H 18#ifndef INITVALS_9485_H
18#define INITVALS_9485_H 19#define INITVALS_9485_H
19 20
20static const u32 ar9485_1_1_mac_core[][2] = { 21/* AR9485 1.0 */
21 /* Addr allmodes */
22 {0x00000008, 0x00000000},
23 {0x00000030, 0x00020085},
24 {0x00000034, 0x00000005},
25 {0x00000040, 0x00000000},
26 {0x00000044, 0x00000000},
27 {0x00000048, 0x00000008},
28 {0x0000004c, 0x00000010},
29 {0x00000050, 0x00000000},
30 {0x00001040, 0x002ffc0f},
31 {0x00001044, 0x002ffc0f},
32 {0x00001048, 0x002ffc0f},
33 {0x0000104c, 0x002ffc0f},
34 {0x00001050, 0x002ffc0f},
35 {0x00001054, 0x002ffc0f},
36 {0x00001058, 0x002ffc0f},
37 {0x0000105c, 0x002ffc0f},
38 {0x00001060, 0x002ffc0f},
39 {0x00001064, 0x002ffc0f},
40 {0x000010f0, 0x00000100},
41 {0x00001270, 0x00000000},
42 {0x000012b0, 0x00000000},
43 {0x000012f0, 0x00000000},
44 {0x0000143c, 0x00000000},
45 {0x0000147c, 0x00000000},
46 {0x00008000, 0x00000000},
47 {0x00008004, 0x00000000},
48 {0x00008008, 0x00000000},
49 {0x0000800c, 0x00000000},
50 {0x00008018, 0x00000000},
51 {0x00008020, 0x00000000},
52 {0x00008038, 0x00000000},
53 {0x0000803c, 0x00000000},
54 {0x00008040, 0x00000000},
55 {0x00008044, 0x00000000},
56 {0x00008048, 0x00000000},
57 {0x0000804c, 0xffffffff},
58 {0x00008054, 0x00000000},
59 {0x00008058, 0x00000000},
60 {0x0000805c, 0x000fc78f},
61 {0x00008060, 0x0000000f},
62 {0x00008064, 0x00000000},
63 {0x00008070, 0x00000310},
64 {0x00008074, 0x00000020},
65 {0x00008078, 0x00000000},
66 {0x0000809c, 0x0000000f},
67 {0x000080a0, 0x00000000},
68 {0x000080a4, 0x02ff0000},
69 {0x000080a8, 0x0e070605},
70 {0x000080ac, 0x0000000d},
71 {0x000080b0, 0x00000000},
72 {0x000080b4, 0x00000000},
73 {0x000080b8, 0x00000000},
74 {0x000080bc, 0x00000000},
75 {0x000080c0, 0x2a800000},
76 {0x000080c4, 0x06900168},
77 {0x000080c8, 0x13881c22},
78 {0x000080cc, 0x01f40000},
79 {0x000080d0, 0x00252500},
80 {0x000080d4, 0x00a00000},
81 {0x000080d8, 0x00400000},
82 {0x000080dc, 0x00000000},
83 {0x000080e0, 0xffffffff},
84 {0x000080e4, 0x0000ffff},
85 {0x000080e8, 0x3f3f3f3f},
86 {0x000080ec, 0x00000000},
87 {0x000080f0, 0x00000000},
88 {0x000080f4, 0x00000000},
89 {0x000080fc, 0x00020000},
90 {0x00008100, 0x00000000},
91 {0x00008108, 0x00000052},
92 {0x0000810c, 0x00000000},
93 {0x00008110, 0x00000000},
94 {0x00008114, 0x000007ff},
95 {0x00008118, 0x000000aa},
96 {0x0000811c, 0x00003210},
97 {0x00008124, 0x00000000},
98 {0x00008128, 0x00000000},
99 {0x0000812c, 0x00000000},
100 {0x00008130, 0x00000000},
101 {0x00008134, 0x00000000},
102 {0x00008138, 0x00000000},
103 {0x0000813c, 0x0000ffff},
104 {0x00008144, 0xffffffff},
105 {0x00008168, 0x00000000},
106 {0x0000816c, 0x00000000},
107 {0x00008170, 0x18486200},
108 {0x00008174, 0x33332210},
109 {0x00008178, 0x00000000},
110 {0x0000817c, 0x00020000},
111 {0x000081c0, 0x00000000},
112 {0x000081c4, 0x33332210},
113 {0x000081d4, 0x00000000},
114 {0x000081ec, 0x00000000},
115 {0x000081f0, 0x00000000},
116 {0x000081f4, 0x00000000},
117 {0x000081f8, 0x00000000},
118 {0x000081fc, 0x00000000},
119 {0x00008240, 0x00100000},
120 {0x00008244, 0x0010f400},
121 {0x00008248, 0x00000800},
122 {0x0000824c, 0x0001e800},
123 {0x00008250, 0x00000000},
124 {0x00008254, 0x00000000},
125 {0x00008258, 0x00000000},
126 {0x0000825c, 0x40000000},
127 {0x00008260, 0x00080922},
128 {0x00008264, 0x9ca00010},
129 {0x00008268, 0xffffffff},
130 {0x0000826c, 0x0000ffff},
131 {0x00008270, 0x00000000},
132 {0x00008274, 0x40000000},
133 {0x00008278, 0x003e4180},
134 {0x0000827c, 0x00000004},
135 {0x00008284, 0x0000002c},
136 {0x00008288, 0x0000002c},
137 {0x0000828c, 0x000000ff},
138 {0x00008294, 0x00000000},
139 {0x00008298, 0x00000000},
140 {0x0000829c, 0x00000000},
141 {0x00008300, 0x00000140},
142 {0x00008314, 0x00000000},
143 {0x0000831c, 0x0000010d},
144 {0x00008328, 0x00000000},
145 {0x0000832c, 0x00000007},
146 {0x00008330, 0x00000302},
147 {0x00008334, 0x00000700},
148 {0x00008338, 0x00ff0000},
149 {0x0000833c, 0x02400000},
150 {0x00008340, 0x000107ff},
151 {0x00008344, 0xa248105b},
152 {0x00008348, 0x008f0000},
153 {0x0000835c, 0x00000000},
154 {0x00008360, 0xffffffff},
155 {0x00008364, 0xffffffff},
156 {0x00008368, 0x00000000},
157 {0x00008370, 0x00000000},
158 {0x00008374, 0x000000ff},
159 {0x00008378, 0x00000000},
160 {0x0000837c, 0x00000000},
161 {0x00008380, 0xffffffff},
162 {0x00008384, 0xffffffff},
163 {0x00008390, 0xffffffff},
164 {0x00008394, 0xffffffff},
165 {0x00008398, 0x00000000},
166 {0x0000839c, 0x00000000},
167 {0x000083a0, 0x00000000},
168 {0x000083a4, 0x0000fa14},
169 {0x000083a8, 0x000f0c00},
170 {0x000083ac, 0x33332210},
171 {0x000083b0, 0x33332210},
172 {0x000083b4, 0x33332210},
173 {0x000083b8, 0x33332210},
174 {0x000083bc, 0x00000000},
175 {0x000083c0, 0x00000000},
176 {0x000083c4, 0x00000000},
177 {0x000083c8, 0x00000000},
178 {0x000083cc, 0x00000200},
179 {0x000083d0, 0x000301ff},
180};
181 22
182static const u32 ar9485_1_1_baseband_core[][2] = { 23#define ar9485_1_1_mac_postamble ar9300_2p2_mac_postamble
183 /* Addr allmodes */
184 {0x00009800, 0xafe68e30},
185 {0x00009804, 0xfd14e000},
186 {0x00009808, 0x9c0a8f6b},
187 {0x0000980c, 0x04800000},
188 {0x00009814, 0x9280c00a},
189 {0x00009818, 0x00000000},
190 {0x0000981c, 0x00020028},
191 {0x00009834, 0x5f3ca3de},
192 {0x00009838, 0x0108ecff},
193 {0x0000983c, 0x14750600},
194 {0x00009880, 0x201fff00},
195 {0x00009884, 0x00001042},
196 {0x000098a4, 0x00200400},
197 {0x000098b0, 0x52440bbe},
198 {0x000098d0, 0x004b6a8e},
199 {0x000098d4, 0x00000820},
200 {0x000098dc, 0x00000000},
201 {0x000098f0, 0x00000000},
202 {0x000098f4, 0x00000000},
203 {0x00009c04, 0x00000000},
204 {0x00009c08, 0x03200000},
205 {0x00009c0c, 0x00000000},
206 {0x00009c10, 0x00000000},
207 {0x00009c14, 0x00046384},
208 {0x00009c18, 0x05b6b440},
209 {0x00009c1c, 0x00b6b440},
210 {0x00009d00, 0xc080a333},
211 {0x00009d04, 0x40206c10},
212 {0x00009d08, 0x009c4060},
213 {0x00009d0c, 0x1883800a},
214 {0x00009d10, 0x01834061},
215 {0x00009d14, 0x00c00400},
216 {0x00009d18, 0x00000000},
217 {0x00009d1c, 0x00000000},
218 {0x00009e08, 0x0038233c},
219 {0x00009e24, 0x9927b515},
220 {0x00009e28, 0x12ef0200},
221 {0x00009e30, 0x06336f77},
222 {0x00009e34, 0x6af6532f},
223 {0x00009e38, 0x0cc80c00},
224 {0x00009e40, 0x0d261820},
225 {0x00009e4c, 0x00001004},
226 {0x00009e50, 0x00ff03f1},
227 {0x00009fc0, 0x80be4788},
228 {0x00009fc4, 0x0001efb5},
229 {0x00009fcc, 0x40000014},
230 {0x0000a20c, 0x00000000},
231 {0x0000a210, 0x00000000},
232 {0x0000a220, 0x00000000},
233 {0x0000a224, 0x00000000},
234 {0x0000a228, 0x10002310},
235 {0x0000a23c, 0x00000000},
236 {0x0000a244, 0x0c000000},
237 {0x0000a2a0, 0x00000001},
238 {0x0000a2c0, 0x00000001},
239 {0x0000a2c8, 0x00000000},
240 {0x0000a2cc, 0x18c43433},
241 {0x0000a2d4, 0x00000000},
242 {0x0000a2dc, 0x00000000},
243 {0x0000a2e0, 0x00000000},
244 {0x0000a2e4, 0x00000000},
245 {0x0000a2e8, 0x00000000},
246 {0x0000a2ec, 0x00000000},
247 {0x0000a2f0, 0x00000000},
248 {0x0000a2f4, 0x00000000},
249 {0x0000a2f8, 0x00000000},
250 {0x0000a344, 0x00000000},
251 {0x0000a34c, 0x00000000},
252 {0x0000a350, 0x0000a000},
253 {0x0000a364, 0x00000000},
254 {0x0000a370, 0x00000000},
255 {0x0000a390, 0x00000001},
256 {0x0000a394, 0x00000444},
257 {0x0000a398, 0x001f0e0f},
258 {0x0000a39c, 0x0075393f},
259 {0x0000a3a0, 0xb79f6427},
260 {0x0000a3a4, 0x000000ff},
261 {0x0000a3a8, 0x3b3b3b3b},
262 {0x0000a3ac, 0x2f2f2f2f},
263 {0x0000a3c0, 0x20202020},
264 {0x0000a3c4, 0x22222220},
265 {0x0000a3c8, 0x20200020},
266 {0x0000a3cc, 0x20202020},
267 {0x0000a3d0, 0x20202020},
268 {0x0000a3d4, 0x20202020},
269 {0x0000a3d8, 0x20202020},
270 {0x0000a3dc, 0x20202020},
271 {0x0000a3e0, 0x20202020},
272 {0x0000a3e4, 0x20202020},
273 {0x0000a3e8, 0x20202020},
274 {0x0000a3ec, 0x20202020},
275 {0x0000a3f0, 0x00000000},
276 {0x0000a3f4, 0x00000006},
277 {0x0000a3f8, 0x0cdbd380},
278 {0x0000a3fc, 0x000f0f01},
279 {0x0000a400, 0x8fa91f01},
280 {0x0000a404, 0x00000000},
281 {0x0000a408, 0x0e79e5c6},
282 {0x0000a40c, 0x00820820},
283 {0x0000a414, 0x1ce739cf},
284 {0x0000a418, 0x2d0019ce},
285 {0x0000a41c, 0x1ce739ce},
286 {0x0000a420, 0x000001ce},
287 {0x0000a424, 0x1ce739ce},
288 {0x0000a428, 0x000001ce},
289 {0x0000a42c, 0x1ce739ce},
290 {0x0000a430, 0x1ce739ce},
291 {0x0000a434, 0x00000000},
292 {0x0000a438, 0x00001801},
293 {0x0000a43c, 0x00000000},
294 {0x0000a440, 0x00000000},
295 {0x0000a444, 0x00000000},
296 {0x0000a448, 0x04000000},
297 {0x0000a44c, 0x00000001},
298 {0x0000a450, 0x00010000},
299 {0x0000a5c4, 0xbfad9d74},
300 {0x0000a5c8, 0x0048060a},
301 {0x0000a5cc, 0x00000637},
302 {0x0000a760, 0x03020100},
303 {0x0000a764, 0x09080504},
304 {0x0000a768, 0x0d0c0b0a},
305 {0x0000a76c, 0x13121110},
306 {0x0000a770, 0x31301514},
307 {0x0000a774, 0x35343332},
308 {0x0000a778, 0x00000036},
309 {0x0000a780, 0x00000838},
310 {0x0000a7c0, 0x00000000},
311 {0x0000a7c4, 0xfffffffc},
312 {0x0000a7c8, 0x00000000},
313 {0x0000a7cc, 0x00000000},
314 {0x0000a7d0, 0x00000000},
315 {0x0000a7d4, 0x00000004},
316 {0x0000a7dc, 0x00000000},
317};
318 24
319static const u32 ar9485Common_1_1[][2] = { 25static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = {
320 /* Addr allmodes */ 26 /* Addr allmodes */
321 {0x00007010, 0x00000022}, 27 {0x00018c00, 0x18012e5e},
322 {0x00007020, 0x00000000}, 28 {0x00018c04, 0x000801d8},
323 {0x00007034, 0x00000002}, 29 {0x00018c08, 0x0000080c},
324 {0x00007038, 0x000004c2},
325}; 30};
326 31
327static const u32 ar9485_1_1_baseband_postamble[][5] = { 32static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = {
328 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 33 /* Addr allmodes */
329 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005}, 34 {0x0000a000, 0x00060005},
330 {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e}, 35 {0x0000a004, 0x00810080},
331 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0}, 36 {0x0000a008, 0x00830082},
332 {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881}, 37 {0x0000a00c, 0x00850084},
333 {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4}, 38 {0x0000a010, 0x01820181},
334 {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c}, 39 {0x0000a014, 0x01840183},
335 {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044}, 40 {0x0000a018, 0x01880185},
336 {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0}, 41 {0x0000a01c, 0x018a0189},
337 {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020}, 42 {0x0000a020, 0x02850284},
338 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2}, 43 {0x0000a024, 0x02890288},
339 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e}, 44 {0x0000a028, 0x028b028a},
340 {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e}, 45 {0x0000a02c, 0x03850384},
341 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 46 {0x0000a030, 0x03890388},
342 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 47 {0x0000a034, 0x038b038a},
343 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, 48 {0x0000a038, 0x038d038c},
344 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, 49 {0x0000a03c, 0x03910390},
345 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222}, 50 {0x0000a040, 0x03930392},
346 {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324}, 51 {0x0000a044, 0x03950394},
347 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010}, 52 {0x0000a048, 0x00000396},
348 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, 53 {0x0000a04c, 0x00000000},
349 {0x0000a204, 0x01303fc0, 0x01303fc4, 0x01303fc4, 0x01303fc0}, 54 {0x0000a050, 0x00000000},
350 {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004}, 55 {0x0000a054, 0x00000000},
351 {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b}, 56 {0x0000a058, 0x00000000},
352 {0x0000a234, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff}, 57 {0x0000a05c, 0x00000000},
353 {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018}, 58 {0x0000a060, 0x00000000},
354 {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108}, 59 {0x0000a064, 0x00000000},
355 {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898}, 60 {0x0000a068, 0x00000000},
356 {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002}, 61 {0x0000a06c, 0x00000000},
357 {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e}, 62 {0x0000a070, 0x00000000},
358 {0x0000a260, 0x3a021501, 0x3a021501, 0x3a021501, 0x3a021501}, 63 {0x0000a074, 0x00000000},
359 {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e}, 64 {0x0000a078, 0x00000000},
360 {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b}, 65 {0x0000a07c, 0x00000000},
361 {0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0}, 66 {0x0000a080, 0x28282828},
362 {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 67 {0x0000a084, 0x28282828},
363 {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 68 {0x0000a088, 0x28282828},
364 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, 69 {0x0000a08c, 0x28282828},
365 {0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982}, 70 {0x0000a090, 0x28282828},
366 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a}, 71 {0x0000a094, 0x24242428},
367 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 72 {0x0000a098, 0x171e1e1e},
368 {0x0000be04, 0x00802020, 0x00802020, 0x00802020, 0x00802020}, 73 {0x0000a09c, 0x02020b0b},
369 {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 74 {0x0000a0a0, 0x02020202},
75 {0x0000a0a4, 0x00000000},
76 {0x0000a0a8, 0x00000000},
77 {0x0000a0ac, 0x00000000},
78 {0x0000a0b0, 0x00000000},
79 {0x0000a0b4, 0x00000000},
80 {0x0000a0b8, 0x00000000},
81 {0x0000a0bc, 0x00000000},
82 {0x0000a0c0, 0x22072208},
83 {0x0000a0c4, 0x22052206},
84 {0x0000a0c8, 0x22032204},
85 {0x0000a0cc, 0x22012202},
86 {0x0000a0d0, 0x221f2200},
87 {0x0000a0d4, 0x221d221e},
88 {0x0000a0d8, 0x33023303},
89 {0x0000a0dc, 0x33003301},
90 {0x0000a0e0, 0x331e331f},
91 {0x0000a0e4, 0x4402331d},
92 {0x0000a0e8, 0x44004401},
93 {0x0000a0ec, 0x441e441f},
94 {0x0000a0f0, 0x55025503},
95 {0x0000a0f4, 0x55005501},
96 {0x0000a0f8, 0x551e551f},
97 {0x0000a0fc, 0x6602551d},
98 {0x0000a100, 0x66006601},
99 {0x0000a104, 0x661e661f},
100 {0x0000a108, 0x7703661d},
101 {0x0000a10c, 0x77017702},
102 {0x0000a110, 0x00007700},
103 {0x0000a114, 0x00000000},
104 {0x0000a118, 0x00000000},
105 {0x0000a11c, 0x00000000},
106 {0x0000a120, 0x00000000},
107 {0x0000a124, 0x00000000},
108 {0x0000a128, 0x00000000},
109 {0x0000a12c, 0x00000000},
110 {0x0000a130, 0x00000000},
111 {0x0000a134, 0x00000000},
112 {0x0000a138, 0x00000000},
113 {0x0000a13c, 0x00000000},
114 {0x0000a140, 0x001f0000},
115 {0x0000a144, 0x111f1100},
116 {0x0000a148, 0x111d111e},
117 {0x0000a14c, 0x111b111c},
118 {0x0000a150, 0x22032204},
119 {0x0000a154, 0x22012202},
120 {0x0000a158, 0x221f2200},
121 {0x0000a15c, 0x221d221e},
122 {0x0000a160, 0x33013302},
123 {0x0000a164, 0x331f3300},
124 {0x0000a168, 0x4402331e},
125 {0x0000a16c, 0x44004401},
126 {0x0000a170, 0x441e441f},
127 {0x0000a174, 0x55015502},
128 {0x0000a178, 0x551f5500},
129 {0x0000a17c, 0x6602551e},
130 {0x0000a180, 0x66006601},
131 {0x0000a184, 0x661e661f},
132 {0x0000a188, 0x7703661d},
133 {0x0000a18c, 0x77017702},
134 {0x0000a190, 0x00007700},
135 {0x0000a194, 0x00000000},
136 {0x0000a198, 0x00000000},
137 {0x0000a19c, 0x00000000},
138 {0x0000a1a0, 0x00000000},
139 {0x0000a1a4, 0x00000000},
140 {0x0000a1a8, 0x00000000},
141 {0x0000a1ac, 0x00000000},
142 {0x0000a1b0, 0x00000000},
143 {0x0000a1b4, 0x00000000},
144 {0x0000a1b8, 0x00000000},
145 {0x0000a1bc, 0x00000000},
146 {0x0000a1c0, 0x00000000},
147 {0x0000a1c4, 0x00000000},
148 {0x0000a1c8, 0x00000000},
149 {0x0000a1cc, 0x00000000},
150 {0x0000a1d0, 0x00000000},
151 {0x0000a1d4, 0x00000000},
152 {0x0000a1d8, 0x00000000},
153 {0x0000a1dc, 0x00000000},
154 {0x0000a1e0, 0x00000000},
155 {0x0000a1e4, 0x00000000},
156 {0x0000a1e8, 0x00000000},
157 {0x0000a1ec, 0x00000000},
158 {0x0000a1f0, 0x00000396},
159 {0x0000a1f4, 0x00000396},
160 {0x0000a1f8, 0x00000396},
161 {0x0000a1fc, 0x00000296},
370}; 162};
371 163
372static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = { 164static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
373 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 165 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
374 {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002}, 166 {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
375 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8}, 167 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
376 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 168 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -442,102 +234,34 @@ static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = {
442 {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260}, 234 {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
443}; 235};
444 236
445static const u32 ar9485_modes_lowest_ob_db_tx_gain_1_1[][5] = { 237#define ar9485Modes_high_ob_db_tx_gain_1_1 ar9485Modes_high_power_tx_gain_1_1
446 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
447 {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
448 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
449 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
450 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
451 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
452 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
453 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
454 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
455 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
456 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
457 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
458 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
459 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
460 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
461 {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
462 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
463 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
464 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
465 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
466 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
467 {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
468 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
469 {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
470 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
471 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
472 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
473 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
474 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
475 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
476 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
477 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
478 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
479 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
480 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
481 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
482 {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
483 {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
484 {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
485 {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
486 {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
487 {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
488 {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
489 {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
490 {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
491 {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
492 {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
493 {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
494 {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
495 {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
496 {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
497 {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
498 {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
499 {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
500 {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
501 {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
502 {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
503 {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
504 {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
505 {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
506 {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
507 {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
508 {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
509 {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
510 {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
511 {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
512 {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
513 {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
514 {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
515 {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
516};
517 238
518static const u32 ar9485_1_1_radio_postamble[][2] = { 239#define ar9485Modes_low_ob_db_tx_gain_1_1 ar9485Modes_high_ob_db_tx_gain_1_1
519 /* Addr allmodes */
520 {0x0001609c, 0x0b283f31},
521 {0x000160ac, 0x24611800},
522 {0x000160b0, 0x03284f3e},
523 {0x0001610c, 0x00170000},
524 {0x00016140, 0x50804008},
525};
526 240
527static const u32 ar9485_1_1_mac_postamble[][5] = { 241#define ar9485_modes_lowest_ob_db_tx_gain_1_1 ar9485Modes_low_ob_db_tx_gain_1_1
528 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 242
529 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160}, 243static const u32 ar9485_1_1[][2] = {
530 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c}, 244 /* Addr allmodes */
531 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38}, 245 {0x0000a580, 0x00000000},
532 {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00}, 246 {0x0000a584, 0x00000000},
533 {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b}, 247 {0x0000a588, 0x00000000},
534 {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810}, 248 {0x0000a58c, 0x00000000},
535 {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a}, 249 {0x0000a590, 0x00000000},
536 {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440}, 250 {0x0000a594, 0x00000000},
251 {0x0000a598, 0x00000000},
252 {0x0000a59c, 0x00000000},
253 {0x0000a5a0, 0x00000000},
254 {0x0000a5a4, 0x00000000},
255 {0x0000a5a8, 0x00000000},
256 {0x0000a5ac, 0x00000000},
257 {0x0000a5b0, 0x00000000},
258 {0x0000a5b4, 0x00000000},
259 {0x0000a5b8, 0x00000000},
260 {0x0000a5bc, 0x00000000},
537}; 261};
538 262
539static const u32 ar9485_1_1_radio_core[][2] = { 263static const u32 ar9485_1_1_radio_core[][2] = {
540 /* Addr allmodes */ 264 /* Addr allmodes */
541 {0x00016000, 0x36db6db6}, 265 {0x00016000, 0x36db6db6},
542 {0x00016004, 0x6db6db40}, 266 {0x00016004, 0x6db6db40},
543 {0x00016008, 0x73800000}, 267 {0x00016008, 0x73800000},
@@ -601,294 +325,145 @@ static const u32 ar9485_1_1_radio_core[][2] = {
601 {0x00016c44, 0x12000000}, 325 {0x00016c44, 0x12000000},
602}; 326};
603 327
604static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_enable_L1[][2] = { 328static const u32 ar9485_1_1_baseband_core[][2] = {
605 /* Addr allmodes */ 329 /* Addr allmodes */
606 {0x00018c00, 0x18052e5e}, 330 {0x00009800, 0xafe68e30},
607 {0x00018c04, 0x000801d8}, 331 {0x00009804, 0xfd14e000},
608 {0x00018c08, 0x0000080c}, 332 {0x00009808, 0x9c0a8f6b},
609}; 333 {0x0000980c, 0x04800000},
610 334 {0x00009814, 0x9280c00a},
611static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = { 335 {0x00009818, 0x00000000},
612 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 336 {0x0000981c, 0x00020028},
613 {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002}, 337 {0x00009834, 0x5f3ca3de},
614 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8}, 338 {0x00009838, 0x0108ecff},
615 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 339 {0x0000983c, 0x14750600},
616 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000}, 340 {0x00009880, 0x201fff00},
617 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002}, 341 {0x00009884, 0x00001042},
618 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004}, 342 {0x000098a4, 0x00200400},
619 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200}, 343 {0x000098b0, 0x52440bbe},
620 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202}, 344 {0x000098d0, 0x004b6a8e},
621 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400}, 345 {0x000098d4, 0x00000820},
622 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402}, 346 {0x000098dc, 0x00000000},
623 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404}, 347 {0x000098f0, 0x00000000},
624 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603}, 348 {0x000098f4, 0x00000000},
625 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605}, 349 {0x00009c04, 0x00000000},
626 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03}, 350 {0x00009c08, 0x03200000},
627 {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04}, 351 {0x00009c0c, 0x00000000},
628 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20}, 352 {0x00009c10, 0x00000000},
629 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21}, 353 {0x00009c14, 0x00046384},
630 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62}, 354 {0x00009c18, 0x05b6b440},
631 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63}, 355 {0x00009c1c, 0x00b6b440},
632 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65}, 356 {0x00009d00, 0xc080a333},
633 {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66}, 357 {0x00009d04, 0x40206c10},
634 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645}, 358 {0x00009d08, 0x009c4060},
635 {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865}, 359 {0x00009d0c, 0x1883800a},
636 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86}, 360 {0x00009d10, 0x01834061},
637 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9}, 361 {0x00009d14, 0x00c00400},
638 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb}, 362 {0x00009d18, 0x00000000},
639 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb}, 363 {0x00009d1c, 0x00000000},
640 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb}, 364 {0x00009e08, 0x0038233c},
641 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb}, 365 {0x00009e24, 0x9927b515},
642 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, 366 {0x00009e28, 0x12ef0200},
643 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, 367 {0x00009e30, 0x06336f77},
644 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, 368 {0x00009e34, 0x6af6532f},
645 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, 369 {0x00009e38, 0x0cc80c00},
646 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, 370 {0x00009e40, 0x0d261820},
647 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, 371 {0x00009e4c, 0x00001004},
648 {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 372 {0x00009e50, 0x00ff03f1},
649 {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 373 {0x00009fc0, 0x80be4788},
650 {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 374 {0x00009fc4, 0x0001efb5},
651 {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 375 {0x00009fcc, 0x40000014},
652 {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 376 {0x0000a20c, 0x00000000},
653 {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 377 {0x0000a210, 0x00000000},
654 {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 378 {0x0000a220, 0x00000000},
655 {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 379 {0x0000a224, 0x00000000},
656 {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 380 {0x0000a228, 0x10002310},
657 {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 381 {0x0000a23c, 0x00000000},
658 {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 382 {0x0000a244, 0x0c000000},
659 {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 383 {0x0000a2a0, 0x00000001},
660 {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 384 {0x0000a2c0, 0x00000001},
661 {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 385 {0x0000a2c8, 0x00000000},
662 {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 386 {0x0000a2cc, 0x18c43433},
663 {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 387 {0x0000a2d4, 0x00000000},
664 {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 388 {0x0000a2dc, 0x00000000},
665 {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 389 {0x0000a2e0, 0x00000000},
666 {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 390 {0x0000a2e4, 0x00000000},
667 {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 391 {0x0000a2e8, 0x00000000},
668 {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 392 {0x0000a2ec, 0x00000000},
669 {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 393 {0x0000a2f0, 0x00000000},
670 {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 394 {0x0000a2f4, 0x00000000},
671 {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 395 {0x0000a2f8, 0x00000000},
672 {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 396 {0x0000a344, 0x00000000},
673 {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 397 {0x0000a34c, 0x00000000},
674 {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 398 {0x0000a350, 0x0000a000},
675 {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 399 {0x0000a364, 0x00000000},
676 {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 400 {0x0000a370, 0x00000000},
677 {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 401 {0x0000a390, 0x00000001},
678 {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 402 {0x0000a394, 0x00000444},
679 {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 403 {0x0000a398, 0x001f0e0f},
680 {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db}, 404 {0x0000a39c, 0x0075393f},
681 {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260}, 405 {0x0000a3a0, 0xb79f6427},
682}; 406 {0x0000a3a4, 0x000000ff},
683 407 {0x0000a3a8, 0x3b3b3b3b},
684static const u32 ar9485_1_1[][2] = { 408 {0x0000a3ac, 0x2f2f2f2f},
685 /* Addr allmodes */ 409 {0x0000a3c0, 0x20202020},
686 {0x0000a580, 0x00000000}, 410 {0x0000a3c4, 0x22222220},
687 {0x0000a584, 0x00000000}, 411 {0x0000a3c8, 0x20200020},
688 {0x0000a588, 0x00000000}, 412 {0x0000a3cc, 0x20202020},
689 {0x0000a58c, 0x00000000}, 413 {0x0000a3d0, 0x20202020},
690 {0x0000a590, 0x00000000}, 414 {0x0000a3d4, 0x20202020},
691 {0x0000a594, 0x00000000}, 415 {0x0000a3d8, 0x20202020},
692 {0x0000a598, 0x00000000}, 416 {0x0000a3dc, 0x20202020},
693 {0x0000a59c, 0x00000000}, 417 {0x0000a3e0, 0x20202020},
694 {0x0000a5a0, 0x00000000}, 418 {0x0000a3e4, 0x20202020},
695 {0x0000a5a4, 0x00000000}, 419 {0x0000a3e8, 0x20202020},
696 {0x0000a5a8, 0x00000000}, 420 {0x0000a3ec, 0x20202020},
697 {0x0000a5ac, 0x00000000}, 421 {0x0000a3f0, 0x00000000},
698 {0x0000a5b0, 0x00000000}, 422 {0x0000a3f4, 0x00000006},
699 {0x0000a5b4, 0x00000000}, 423 {0x0000a3f8, 0x0cdbd380},
700 {0x0000a5b8, 0x00000000}, 424 {0x0000a3fc, 0x000f0f01},
701 {0x0000a5bc, 0x00000000}, 425 {0x0000a400, 0x8fa91f01},
702}; 426 {0x0000a404, 0x00000000},
703 427 {0x0000a408, 0x0e79e5c6},
704static const u32 ar9485_modes_green_ob_db_tx_gain_1_1[][5] = { 428 {0x0000a40c, 0x00820820},
705 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 429 {0x0000a414, 0x1ce739cf},
706 {0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003}, 430 {0x0000a418, 0x2d0019ce},
707 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8}, 431 {0x0000a41c, 0x1ce739ce},
708 {0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000}, 432 {0x0000a420, 0x000001ce},
709 {0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006}, 433 {0x0000a424, 0x1ce739ce},
710 {0x0000a504, 0x05062002, 0x05062002, 0x03000201, 0x03000201}, 434 {0x0000a428, 0x000001ce},
711 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x06000203, 0x06000203}, 435 {0x0000a42c, 0x1ce739ce},
712 {0x0000a50c, 0x11062202, 0x11062202, 0x0a000401, 0x0a000401}, 436 {0x0000a430, 0x1ce739ce},
713 {0x0000a510, 0x17022e00, 0x17022e00, 0x0e000403, 0x0e000403}, 437 {0x0000a434, 0x00000000},
714 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x12000405, 0x12000405}, 438 {0x0000a438, 0x00001801},
715 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x15000604, 0x15000604}, 439 {0x0000a43c, 0x00000000},
716 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x18000605, 0x18000605}, 440 {0x0000a440, 0x00000000},
717 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x1c000a04, 0x1c000a04}, 441 {0x0000a444, 0x00000000},
718 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x21000a06, 0x21000a06}, 442 {0x0000a448, 0x04000000},
719 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x29000a24, 0x29000a24}, 443 {0x0000a44c, 0x00000001},
720 {0x0000a52c, 0x41023e85, 0x41023e85, 0x2f000e21, 0x2f000e21}, 444 {0x0000a450, 0x00010000},
721 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x31000e20, 0x31000e20}, 445 {0x0000a5c4, 0xbfad9d74},
722 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x33000e20, 0x33000e20}, 446 {0x0000a5c8, 0x0048060a},
723 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62}, 447 {0x0000a5cc, 0x00000637},
724 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63}, 448 {0x0000a760, 0x03020100},
725 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65}, 449 {0x0000a764, 0x09080504},
726 {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66}, 450 {0x0000a768, 0x0d0c0b0a},
727 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645}, 451 {0x0000a76c, 0x13121110},
728 {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865}, 452 {0x0000a770, 0x31301514},
729 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86}, 453 {0x0000a774, 0x35343332},
730 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9}, 454 {0x0000a778, 0x00000036},
731 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb}, 455 {0x0000a780, 0x00000838},
732 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb}, 456 {0x0000a7c0, 0x00000000},
733 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb}, 457 {0x0000a7c4, 0xfffffffc},
734 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb}, 458 {0x0000a7c8, 0x00000000},
735 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, 459 {0x0000a7cc, 0x00000000},
736 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, 460 {0x0000a7d0, 0x00000000},
737 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, 461 {0x0000a7d4, 0x00000004},
738 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, 462 {0x0000a7dc, 0x00000000},
739 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
740 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
741 {0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
742 {0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
743 {0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
744 {0x0000b50c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
745 {0x0000b510, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
746 {0x0000b514, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
747 {0x0000b518, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
748 {0x0000b51c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
749 {0x0000b520, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
750 {0x0000b524, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
751 {0x0000b528, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
752 {0x0000b52c, 0x0000002a, 0x0000002a, 0x0000002a, 0x0000002a},
753 {0x0000b530, 0x0000003a, 0x0000003a, 0x0000003a, 0x0000003a},
754 {0x0000b534, 0x0000004a, 0x0000004a, 0x0000004a, 0x0000004a},
755 {0x0000b538, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
756 {0x0000b53c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
757 {0x0000b540, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
758 {0x0000b544, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
759 {0x0000b548, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
760 {0x0000b54c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
761 {0x0000b550, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
762 {0x0000b554, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
763 {0x0000b558, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
764 {0x0000b55c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
765 {0x0000b560, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
766 {0x0000b564, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
767 {0x0000b568, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
768 {0x0000b56c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
769 {0x0000b570, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
770 {0x0000b574, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
771 {0x0000b578, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
772 {0x0000b57c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
773 {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
774 {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
775};
776
777static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = {
778 /* Addr allmodes */
779 {0x00018c00, 0x18013e5e},
780 {0x00018c04, 0x000801d8},
781 {0x00018c08, 0x0000080c},
782};
783
784static const u32 ar9485_1_1_soc_preamble[][2] = {
785 /* Addr allmodes */
786 {0x00004014, 0xba280400},
787 {0x00004090, 0x00aa10aa},
788 {0x000040a4, 0x00a0c9c9},
789 {0x00007010, 0x00000022},
790 {0x00007020, 0x00000000},
791 {0x00007034, 0x00000002},
792 {0x00007038, 0x000004c2},
793 {0x00007048, 0x00000002},
794};
795
796static const u32 ar9485_1_1_baseband_core_txfir_coeff_japan_2484[][2] = {
797 /* Addr allmodes */
798 {0x0000a398, 0x00000000},
799 {0x0000a39c, 0x6f7f0301},
800 {0x0000a3a0, 0xca9228ee},
801};
802
803static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
804 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
805 {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
806 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
807 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
808 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
809 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
810 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
811 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
812 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
813 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
814 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
815 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
816 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
817 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
818 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
819 {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
820 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
821 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
822 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
823 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
824 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
825 {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
826 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
827 {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
828 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
829 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
830 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
831 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
832 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
833 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
834 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
835 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
836 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
837 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
838 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
839 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
840 {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
841 {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
842 {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
843 {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
844 {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
845 {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
846 {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
847 {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
848 {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
849 {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
850 {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
851 {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
852 {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
853 {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
854 {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
855 {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
856 {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
857 {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
858 {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
859 {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
860 {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
861 {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
862 {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
863 {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
864 {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
865 {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
866 {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
867 {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
868 {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
869 {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
870 {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
871 {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
872 {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
873 {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
874};
875
876static const u32 ar9485_fast_clock_1_1_baseband_postamble[][3] = {
877 /* Addr 5G_HT2 5G_HT40 */
878 {0x00009e00, 0x03721821, 0x03721821},
879 {0x0000a230, 0x0000400b, 0x00004016},
880 {0x0000a254, 0x00000898, 0x00001130},
881};
882
883static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = {
884 /* Addr allmodes */
885 {0x00018c00, 0x18012e5e},
886 {0x00018c04, 0x000801d8},
887 {0x00018c08, 0x0000080c},
888}; 463};
889 464
890static const u32 ar9485_common_rx_gain_1_1[][2] = { 465static const u32 ar9485_common_rx_gain_1_1[][2] = {
891 /* Addr allmodes */ 466 /* Addr allmodes */
892 {0x0000a000, 0x00010000}, 467 {0x0000a000, 0x00010000},
893 {0x0000a004, 0x00030002}, 468 {0x0000a004, 0x00030002},
894 {0x0000a008, 0x00050004}, 469 {0x0000a008, 0x00050004},
@@ -1019,143 +594,260 @@ static const u32 ar9485_common_rx_gain_1_1[][2] = {
1019 {0x0000a1fc, 0x00000296}, 594 {0x0000a1fc, 0x00000296},
1020}; 595};
1021 596
597static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_enable_L1[][2] = {
598 /* Addr allmodes */
599 {0x00018c00, 0x18052e5e},
600 {0x00018c04, 0x000801d8},
601 {0x00018c08, 0x0000080c},
602};
603
1022static const u32 ar9485_1_1_pcie_phy_clkreq_enable_L1[][2] = { 604static const u32 ar9485_1_1_pcie_phy_clkreq_enable_L1[][2] = {
1023 /* Addr allmodes */ 605 /* Addr allmodes */
1024 {0x00018c00, 0x18053e5e}, 606 {0x00018c00, 0x18053e5e},
1025 {0x00018c04, 0x000801d8}, 607 {0x00018c04, 0x000801d8},
1026 {0x00018c08, 0x0000080c}, 608 {0x00018c08, 0x0000080c},
1027}; 609};
1028 610
1029static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = { 611static const u32 ar9485_1_1_soc_preamble[][2] = {
1030 /* Addr allmodes */ 612 /* Addr allmodes */
1031 {0x0000a000, 0x00060005}, 613 {0x00004014, 0xba280400},
1032 {0x0000a004, 0x00810080}, 614 {0x00004090, 0x00aa10aa},
1033 {0x0000a008, 0x00830082}, 615 {0x000040a4, 0x00a0c9c9},
1034 {0x0000a00c, 0x00850084}, 616 {0x00007010, 0x00000022},
1035 {0x0000a010, 0x01820181}, 617 {0x00007020, 0x00000000},
1036 {0x0000a014, 0x01840183}, 618 {0x00007034, 0x00000002},
1037 {0x0000a018, 0x01880185}, 619 {0x00007038, 0x000004c2},
1038 {0x0000a01c, 0x018a0189}, 620 {0x00007048, 0x00000002},
1039 {0x0000a020, 0x02850284}, 621};
1040 {0x0000a024, 0x02890288}, 622
1041 {0x0000a028, 0x028b028a}, 623static const u32 ar9485_fast_clock_1_1_baseband_postamble[][3] = {
1042 {0x0000a02c, 0x03850384}, 624 /* Addr 5G_HT20 5G_HT40 */
1043 {0x0000a030, 0x03890388}, 625 {0x00009e00, 0x03721821, 0x03721821},
1044 {0x0000a034, 0x038b038a}, 626 {0x0000a230, 0x0000400b, 0x00004016},
1045 {0x0000a038, 0x038d038c}, 627 {0x0000a254, 0x00000898, 0x00001130},
1046 {0x0000a03c, 0x03910390}, 628};
1047 {0x0000a040, 0x03930392}, 629
1048 {0x0000a044, 0x03950394}, 630static const u32 ar9485_1_1_baseband_postamble[][5] = {
1049 {0x0000a048, 0x00000396}, 631 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1050 {0x0000a04c, 0x00000000}, 632 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
1051 {0x0000a050, 0x00000000}, 633 {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
1052 {0x0000a054, 0x00000000}, 634 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
1053 {0x0000a058, 0x00000000}, 635 {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
1054 {0x0000a05c, 0x00000000}, 636 {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
1055 {0x0000a060, 0x00000000}, 637 {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
1056 {0x0000a064, 0x00000000}, 638 {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
1057 {0x0000a068, 0x00000000}, 639 {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
1058 {0x0000a06c, 0x00000000}, 640 {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
1059 {0x0000a070, 0x00000000}, 641 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
1060 {0x0000a074, 0x00000000}, 642 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e},
1061 {0x0000a078, 0x00000000}, 643 {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
1062 {0x0000a07c, 0x00000000}, 644 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1063 {0x0000a080, 0x28282828}, 645 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
1064 {0x0000a084, 0x28282828}, 646 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
1065 {0x0000a088, 0x28282828}, 647 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
1066 {0x0000a08c, 0x28282828}, 648 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
1067 {0x0000a090, 0x28282828}, 649 {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
1068 {0x0000a094, 0x24242428}, 650 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
1069 {0x0000a098, 0x171e1e1e}, 651 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
1070 {0x0000a09c, 0x02020b0b}, 652 {0x0000a204, 0x01303fc0, 0x01303fc4, 0x01303fc4, 0x01303fc0},
1071 {0x0000a0a0, 0x02020202}, 653 {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
1072 {0x0000a0a4, 0x00000000}, 654 {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
1073 {0x0000a0a8, 0x00000000}, 655 {0x0000a234, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff},
1074 {0x0000a0ac, 0x00000000}, 656 {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
1075 {0x0000a0b0, 0x00000000}, 657 {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
1076 {0x0000a0b4, 0x00000000}, 658 {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
1077 {0x0000a0b8, 0x00000000}, 659 {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
1078 {0x0000a0bc, 0x00000000}, 660 {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
1079 {0x0000a0c0, 0x22072208}, 661 {0x0000a260, 0x3a021501, 0x3a021501, 0x3a021501, 0x3a021501},
1080 {0x0000a0c4, 0x22052206}, 662 {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
1081 {0x0000a0c8, 0x22032204}, 663 {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
1082 {0x0000a0cc, 0x22012202}, 664 {0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0},
1083 {0x0000a0d0, 0x221f2200}, 665 {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1084 {0x0000a0d4, 0x221d221e}, 666 {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1085 {0x0000a0d8, 0x33023303}, 667 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
1086 {0x0000a0dc, 0x33003301}, 668 {0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982},
1087 {0x0000a0e0, 0x331e331f}, 669 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
1088 {0x0000a0e4, 0x4402331d}, 670 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1089 {0x0000a0e8, 0x44004401}, 671 {0x0000be04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
1090 {0x0000a0ec, 0x441e441f}, 672 {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1091 {0x0000a0f0, 0x55025503}, 673};
1092 {0x0000a0f4, 0x55005501}, 674
1093 {0x0000a0f8, 0x551e551f}, 675static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = {
1094 {0x0000a0fc, 0x6602551d}, 676 /* Addr allmodes */
1095 {0x0000a100, 0x66006601}, 677 {0x00018c00, 0x18013e5e},
1096 {0x0000a104, 0x661e661f}, 678 {0x00018c04, 0x000801d8},
1097 {0x0000a108, 0x7703661d}, 679 {0x00018c08, 0x0000080c},
1098 {0x0000a10c, 0x77017702}, 680};
1099 {0x0000a110, 0x00007700}, 681
1100 {0x0000a114, 0x00000000}, 682static const u32 ar9485_1_1_radio_postamble[][2] = {
1101 {0x0000a118, 0x00000000}, 683 /* Addr allmodes */
1102 {0x0000a11c, 0x00000000}, 684 {0x0001609c, 0x0b283f31},
1103 {0x0000a120, 0x00000000}, 685 {0x000160ac, 0x24611800},
1104 {0x0000a124, 0x00000000}, 686 {0x000160b0, 0x03284f3e},
1105 {0x0000a128, 0x00000000}, 687 {0x0001610c, 0x00170000},
1106 {0x0000a12c, 0x00000000}, 688 {0x00016140, 0x50804008},
1107 {0x0000a130, 0x00000000}, 689};
1108 {0x0000a134, 0x00000000}, 690
1109 {0x0000a138, 0x00000000}, 691static const u32 ar9485_1_1_mac_core[][2] = {
1110 {0x0000a13c, 0x00000000}, 692 /* Addr allmodes */
1111 {0x0000a140, 0x001f0000}, 693 {0x00000008, 0x00000000},
1112 {0x0000a144, 0x111f1100}, 694 {0x00000030, 0x00020085},
1113 {0x0000a148, 0x111d111e}, 695 {0x00000034, 0x00000005},
1114 {0x0000a14c, 0x111b111c}, 696 {0x00000040, 0x00000000},
1115 {0x0000a150, 0x22032204}, 697 {0x00000044, 0x00000000},
1116 {0x0000a154, 0x22012202}, 698 {0x00000048, 0x00000008},
1117 {0x0000a158, 0x221f2200}, 699 {0x0000004c, 0x00000010},
1118 {0x0000a15c, 0x221d221e}, 700 {0x00000050, 0x00000000},
1119 {0x0000a160, 0x33013302}, 701 {0x00001040, 0x002ffc0f},
1120 {0x0000a164, 0x331f3300}, 702 {0x00001044, 0x002ffc0f},
1121 {0x0000a168, 0x4402331e}, 703 {0x00001048, 0x002ffc0f},
1122 {0x0000a16c, 0x44004401}, 704 {0x0000104c, 0x002ffc0f},
1123 {0x0000a170, 0x441e441f}, 705 {0x00001050, 0x002ffc0f},
1124 {0x0000a174, 0x55015502}, 706 {0x00001054, 0x002ffc0f},
1125 {0x0000a178, 0x551f5500}, 707 {0x00001058, 0x002ffc0f},
1126 {0x0000a17c, 0x6602551e}, 708 {0x0000105c, 0x002ffc0f},
1127 {0x0000a180, 0x66006601}, 709 {0x00001060, 0x002ffc0f},
1128 {0x0000a184, 0x661e661f}, 710 {0x00001064, 0x002ffc0f},
1129 {0x0000a188, 0x7703661d}, 711 {0x000010f0, 0x00000100},
1130 {0x0000a18c, 0x77017702}, 712 {0x00001270, 0x00000000},
1131 {0x0000a190, 0x00007700}, 713 {0x000012b0, 0x00000000},
1132 {0x0000a194, 0x00000000}, 714 {0x000012f0, 0x00000000},
1133 {0x0000a198, 0x00000000}, 715 {0x0000143c, 0x00000000},
1134 {0x0000a19c, 0x00000000}, 716 {0x0000147c, 0x00000000},
1135 {0x0000a1a0, 0x00000000}, 717 {0x00008000, 0x00000000},
1136 {0x0000a1a4, 0x00000000}, 718 {0x00008004, 0x00000000},
1137 {0x0000a1a8, 0x00000000}, 719 {0x00008008, 0x00000000},
1138 {0x0000a1ac, 0x00000000}, 720 {0x0000800c, 0x00000000},
1139 {0x0000a1b0, 0x00000000}, 721 {0x00008018, 0x00000000},
1140 {0x0000a1b4, 0x00000000}, 722 {0x00008020, 0x00000000},
1141 {0x0000a1b8, 0x00000000}, 723 {0x00008038, 0x00000000},
1142 {0x0000a1bc, 0x00000000}, 724 {0x0000803c, 0x00000000},
1143 {0x0000a1c0, 0x00000000}, 725 {0x00008040, 0x00000000},
1144 {0x0000a1c4, 0x00000000}, 726 {0x00008044, 0x00000000},
1145 {0x0000a1c8, 0x00000000}, 727 {0x00008048, 0x00000000},
1146 {0x0000a1cc, 0x00000000}, 728 {0x0000804c, 0xffffffff},
1147 {0x0000a1d0, 0x00000000}, 729 {0x00008054, 0x00000000},
1148 {0x0000a1d4, 0x00000000}, 730 {0x00008058, 0x00000000},
1149 {0x0000a1d8, 0x00000000}, 731 {0x0000805c, 0x000fc78f},
1150 {0x0000a1dc, 0x00000000}, 732 {0x00008060, 0x0000000f},
1151 {0x0000a1e0, 0x00000000}, 733 {0x00008064, 0x00000000},
1152 {0x0000a1e4, 0x00000000}, 734 {0x00008070, 0x00000310},
1153 {0x0000a1e8, 0x00000000}, 735 {0x00008074, 0x00000020},
1154 {0x0000a1ec, 0x00000000}, 736 {0x00008078, 0x00000000},
1155 {0x0000a1f0, 0x00000396}, 737 {0x0000809c, 0x0000000f},
1156 {0x0000a1f4, 0x00000396}, 738 {0x000080a0, 0x00000000},
1157 {0x0000a1f8, 0x00000396}, 739 {0x000080a4, 0x02ff0000},
1158 {0x0000a1fc, 0x00000296}, 740 {0x000080a8, 0x0e070605},
741 {0x000080ac, 0x0000000d},
742 {0x000080b0, 0x00000000},
743 {0x000080b4, 0x00000000},
744 {0x000080b8, 0x00000000},
745 {0x000080bc, 0x00000000},
746 {0x000080c0, 0x2a800000},
747 {0x000080c4, 0x06900168},
748 {0x000080c8, 0x13881c22},
749 {0x000080cc, 0x01f40000},
750 {0x000080d0, 0x00252500},
751 {0x000080d4, 0x00a00000},
752 {0x000080d8, 0x00400000},
753 {0x000080dc, 0x00000000},
754 {0x000080e0, 0xffffffff},
755 {0x000080e4, 0x0000ffff},
756 {0x000080e8, 0x3f3f3f3f},
757 {0x000080ec, 0x00000000},
758 {0x000080f0, 0x00000000},
759 {0x000080f4, 0x00000000},
760 {0x000080fc, 0x00020000},
761 {0x00008100, 0x00000000},
762 {0x00008108, 0x00000052},
763 {0x0000810c, 0x00000000},
764 {0x00008110, 0x00000000},
765 {0x00008114, 0x000007ff},
766 {0x00008118, 0x000000aa},
767 {0x0000811c, 0x00003210},
768 {0x00008124, 0x00000000},
769 {0x00008128, 0x00000000},
770 {0x0000812c, 0x00000000},
771 {0x00008130, 0x00000000},
772 {0x00008134, 0x00000000},
773 {0x00008138, 0x00000000},
774 {0x0000813c, 0x0000ffff},
775 {0x00008144, 0xffffffff},
776 {0x00008168, 0x00000000},
777 {0x0000816c, 0x00000000},
778 {0x00008170, 0x18486200},
779 {0x00008174, 0x33332210},
780 {0x00008178, 0x00000000},
781 {0x0000817c, 0x00020000},
782 {0x000081c0, 0x00000000},
783 {0x000081c4, 0x33332210},
784 {0x000081d4, 0x00000000},
785 {0x000081ec, 0x00000000},
786 {0x000081f0, 0x00000000},
787 {0x000081f4, 0x00000000},
788 {0x000081f8, 0x00000000},
789 {0x000081fc, 0x00000000},
790 {0x00008240, 0x00100000},
791 {0x00008244, 0x0010f400},
792 {0x00008248, 0x00000800},
793 {0x0000824c, 0x0001e800},
794 {0x00008250, 0x00000000},
795 {0x00008254, 0x00000000},
796 {0x00008258, 0x00000000},
797 {0x0000825c, 0x40000000},
798 {0x00008260, 0x00080922},
799 {0x00008264, 0x9ca00010},
800 {0x00008268, 0xffffffff},
801 {0x0000826c, 0x0000ffff},
802 {0x00008270, 0x00000000},
803 {0x00008274, 0x40000000},
804 {0x00008278, 0x003e4180},
805 {0x0000827c, 0x00000004},
806 {0x00008284, 0x0000002c},
807 {0x00008288, 0x0000002c},
808 {0x0000828c, 0x000000ff},
809 {0x00008294, 0x00000000},
810 {0x00008298, 0x00000000},
811 {0x0000829c, 0x00000000},
812 {0x00008300, 0x00000140},
813 {0x00008314, 0x00000000},
814 {0x0000831c, 0x0000010d},
815 {0x00008328, 0x00000000},
816 {0x0000832c, 0x00000007},
817 {0x00008330, 0x00000302},
818 {0x00008334, 0x00000700},
819 {0x00008338, 0x00ff0000},
820 {0x0000833c, 0x02400000},
821 {0x00008340, 0x000107ff},
822 {0x00008344, 0xa248105b},
823 {0x00008348, 0x008f0000},
824 {0x0000835c, 0x00000000},
825 {0x00008360, 0xffffffff},
826 {0x00008364, 0xffffffff},
827 {0x00008368, 0x00000000},
828 {0x00008370, 0x00000000},
829 {0x00008374, 0x000000ff},
830 {0x00008378, 0x00000000},
831 {0x0000837c, 0x00000000},
832 {0x00008380, 0xffffffff},
833 {0x00008384, 0xffffffff},
834 {0x00008390, 0xffffffff},
835 {0x00008394, 0xffffffff},
836 {0x00008398, 0x00000000},
837 {0x0000839c, 0x00000000},
838 {0x000083a0, 0x00000000},
839 {0x000083a4, 0x0000fa14},
840 {0x000083a8, 0x000f0c00},
841 {0x000083ac, 0x33332210},
842 {0x000083b0, 0x33332210},
843 {0x000083b4, 0x33332210},
844 {0x000083b8, 0x33332210},
845 {0x000083bc, 0x00000000},
846 {0x000083c0, 0x00000000},
847 {0x000083c4, 0x00000000},
848 {0x000083c8, 0x00000000},
849 {0x000083cc, 0x00000200},
850 {0x000083d0, 0x000301ff},
1159}; 851};
1160 852
1161#endif 853#endif /* INITVALS_9485_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
new file mode 100644
index 000000000000..df97f21c52dc
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
@@ -0,0 +1,1284 @@
1/*
2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef INITVALS_955X_1P0_H
19#define INITVALS_955X_1P0_H
20
21/* AR955X 1.0 */
22
23static const u32 ar955x_1p0_radio_postamble[][5] = {
24 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
25 {0x00016098, 0xd2dd5554, 0xd2dd5554, 0xd28b3330, 0xd28b3330},
26 {0x0001609c, 0x0a566f3a, 0x0a566f3a, 0x06345f2a, 0x06345f2a},
27 {0x000160ac, 0xa4647c00, 0xa4647c00, 0xa4646800, 0xa4646800},
28 {0x000160b0, 0x01885f52, 0x01885f52, 0x04accf3a, 0x04accf3a},
29 {0x00016104, 0xb7a00001, 0xb7a00001, 0xb7a00001, 0xb7a00001},
30 {0x0001610c, 0xc0000000, 0xc0000000, 0xc0000000, 0xc0000000},
31 {0x00016140, 0x10804008, 0x10804008, 0x10804008, 0x10804008},
32 {0x00016504, 0xb7a00001, 0xb7a00001, 0xb7a00001, 0xb7a00001},
33 {0x0001650c, 0xc0000000, 0xc0000000, 0xc0000000, 0xc0000000},
34 {0x00016540, 0x10804008, 0x10804008, 0x10804008, 0x10804008},
35 {0x00016904, 0xb7a00001, 0xb7a00001, 0xb7a00001, 0xb7a00001},
36 {0x0001690c, 0xc0000000, 0xc0000000, 0xc0000000, 0xc0000000},
37 {0x00016940, 0x10804008, 0x10804008, 0x10804008, 0x10804008},
38};
39
40static const u32 ar955x_1p0_baseband_core_txfir_coeff_japan_2484[][2] = {
41 /* Addr allmodes */
42 {0x0000a398, 0x00000000},
43 {0x0000a39c, 0x6f7f0301},
44 {0x0000a3a0, 0xca9228ee},
45};
46
47static const u32 ar955x_1p0_baseband_postamble[][5] = {
48 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
49 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
50 {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
51 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
52 {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
53 {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
54 {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
55 {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
56 {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
57 {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
58 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
59 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
60 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
61 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
62 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
63 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
64 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
65 {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcfa10822, 0xcfa10822},
66 {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
67 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
68 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
69 {0x0000a204, 0x005c0ec0, 0x005c0ec4, 0x005c0ec4, 0x005c0ec0},
70 {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
71 {0x0000a22c, 0x07e26a2f, 0x07e26a2f, 0x01026a2f, 0x01026a2f},
72 {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
73 {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
74 {0x0000a238, 0xffb01018, 0xffb01018, 0xffb01018, 0xffb01018},
75 {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
76 {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
77 {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
78 {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
79 {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
80 {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
81 {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
82 {0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
83 {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
84 {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
85 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
86 {0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33},
87 {0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982},
88 {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
89 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
90 {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
91 {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
92 {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
93 {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
94 {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
95 {0x0000b284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
96 {0x0000b830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
97 {0x0000be04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
98 {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
99 {0x0000be1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
100 {0x0000be20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
101 {0x0000c284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
102};
103
104static const u32 ar955x_1p0_radio_core[][2] = {
105 /* Addr allmodes */
106 {0x00016000, 0x36db6db6},
107 {0x00016004, 0x6db6db40},
108 {0x00016008, 0x73f00000},
109 {0x0001600c, 0x00000000},
110 {0x00016040, 0x7f80fff8},
111 {0x0001604c, 0x76d005b5},
112 {0x00016050, 0x557cf031},
113 {0x00016054, 0x13449440},
114 {0x00016058, 0x0c51c92c},
115 {0x0001605c, 0x3db7fffc},
116 {0x00016060, 0xfffffffc},
117 {0x00016064, 0x000f0278},
118 {0x00016068, 0x6db6db6c},
119 {0x0001606c, 0x6db60000},
120 {0x00016080, 0x00080000},
121 {0x00016084, 0x0e48048c},
122 {0x00016088, 0x14214514},
123 {0x0001608c, 0x119f101e},
124 {0x00016090, 0x24926490},
125 {0x00016094, 0x00000000},
126 {0x000160a0, 0x0a108ffe},
127 {0x000160a4, 0x812fc370},
128 {0x000160a8, 0x423c8000},
129 {0x000160b4, 0x92480080},
130 {0x000160c0, 0x006db6d0},
131 {0x000160c4, 0x6db6db60},
132 {0x000160c8, 0x6db6db6c},
133 {0x000160cc, 0x01e6c000},
134 {0x00016100, 0x11999601},
135 {0x00016108, 0x00080010},
136 {0x00016144, 0x02084080},
137 {0x00016148, 0x000080c0},
138 {0x00016280, 0x01800804},
139 {0x00016284, 0x00038dc5},
140 {0x00016288, 0x00000000},
141 {0x0001628c, 0x00000040},
142 {0x00016380, 0x00000000},
143 {0x00016384, 0x00000000},
144 {0x00016388, 0x00400705},
145 {0x0001638c, 0x00800700},
146 {0x00016390, 0x00800700},
147 {0x00016394, 0x00000000},
148 {0x00016398, 0x00000000},
149 {0x0001639c, 0x00000000},
150 {0x000163a0, 0x00000001},
151 {0x000163a4, 0x00000001},
152 {0x000163a8, 0x00000000},
153 {0x000163ac, 0x00000000},
154 {0x000163b0, 0x00000000},
155 {0x000163b4, 0x00000000},
156 {0x000163b8, 0x00000000},
157 {0x000163bc, 0x00000000},
158 {0x000163c0, 0x000000a0},
159 {0x000163c4, 0x000c0000},
160 {0x000163c8, 0x14021402},
161 {0x000163cc, 0x00001402},
162 {0x000163d0, 0x00000000},
163 {0x000163d4, 0x00000000},
164 {0x00016400, 0x36db6db6},
165 {0x00016404, 0x6db6db40},
166 {0x00016408, 0x73f00000},
167 {0x0001640c, 0x00000000},
168 {0x00016440, 0x7f80fff8},
169 {0x0001644c, 0x76d005b5},
170 {0x00016450, 0x557cf031},
171 {0x00016454, 0x13449440},
172 {0x00016458, 0x0c51c92c},
173 {0x0001645c, 0x3db7fffc},
174 {0x00016460, 0xfffffffc},
175 {0x00016464, 0x000f0278},
176 {0x00016468, 0x6db6db6c},
177 {0x0001646c, 0x6db60000},
178 {0x00016500, 0x11999601},
179 {0x00016508, 0x00080010},
180 {0x00016544, 0x02084080},
181 {0x00016548, 0x000080c0},
182 {0x00016780, 0x00000000},
183 {0x00016784, 0x00000000},
184 {0x00016788, 0x00400705},
185 {0x0001678c, 0x00800700},
186 {0x00016790, 0x00800700},
187 {0x00016794, 0x00000000},
188 {0x00016798, 0x00000000},
189 {0x0001679c, 0x00000000},
190 {0x000167a0, 0x00000001},
191 {0x000167a4, 0x00000001},
192 {0x000167a8, 0x00000000},
193 {0x000167ac, 0x00000000},
194 {0x000167b0, 0x00000000},
195 {0x000167b4, 0x00000000},
196 {0x000167b8, 0x00000000},
197 {0x000167bc, 0x00000000},
198 {0x000167c0, 0x000000a0},
199 {0x000167c4, 0x000c0000},
200 {0x000167c8, 0x14021402},
201 {0x000167cc, 0x00001402},
202 {0x000167d0, 0x00000000},
203 {0x000167d4, 0x00000000},
204 {0x00016800, 0x36db6db6},
205 {0x00016804, 0x6db6db40},
206 {0x00016808, 0x73f00000},
207 {0x0001680c, 0x00000000},
208 {0x00016840, 0x7f80fff8},
209 {0x0001684c, 0x76d005b5},
210 {0x00016850, 0x557cf031},
211 {0x00016854, 0x13449440},
212 {0x00016858, 0x0c51c92c},
213 {0x0001685c, 0x3db7fffc},
214 {0x00016860, 0xfffffffc},
215 {0x00016864, 0x000f0278},
216 {0x00016868, 0x6db6db6c},
217 {0x0001686c, 0x6db60000},
218 {0x00016900, 0x11999601},
219 {0x00016908, 0x00080010},
220 {0x00016944, 0x02084080},
221 {0x00016948, 0x000080c0},
222 {0x00016b80, 0x00000000},
223 {0x00016b84, 0x00000000},
224 {0x00016b88, 0x00400705},
225 {0x00016b8c, 0x00800700},
226 {0x00016b90, 0x00800700},
227 {0x00016b94, 0x00000000},
228 {0x00016b98, 0x00000000},
229 {0x00016b9c, 0x00000000},
230 {0x00016ba0, 0x00000001},
231 {0x00016ba4, 0x00000001},
232 {0x00016ba8, 0x00000000},
233 {0x00016bac, 0x00000000},
234 {0x00016bb0, 0x00000000},
235 {0x00016bb4, 0x00000000},
236 {0x00016bb8, 0x00000000},
237 {0x00016bbc, 0x00000000},
238 {0x00016bc0, 0x000000a0},
239 {0x00016bc4, 0x000c0000},
240 {0x00016bc8, 0x14021402},
241 {0x00016bcc, 0x00001402},
242 {0x00016bd0, 0x00000000},
243 {0x00016bd4, 0x00000000},
244};
245
246static const u32 ar955x_1p0_modes_xpa_tx_gain_table[][9] = {
247 /* Addr 5G_HT20_L 5G_HT40_L 5G_HT20_M 5G_HT40_M 5G_HT20_H 5G_HT40_H 2G_HT40 2G_HT20 */
248 {0x0000a2dc, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xfffd5aaa, 0xfffd5aaa},
249 {0x0000a2e0, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xfffe9ccc, 0xfffe9ccc},
250 {0x0000a2e4, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xffffe0f0, 0xffffe0f0},
251 {0x0000a2e8, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xfffcff00, 0xfffcff00},
252 {0x0000a410, 0x000050de, 0x000050de, 0x000050de, 0x000050de, 0x000050de, 0x000050de, 0x000050da, 0x000050da},
253 {0x0000a500, 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000000, 0x00000000},
254 {0x0000a504, 0x04000005, 0x04000005, 0x04000005, 0x04000005, 0x04000005, 0x04000005, 0x04000002, 0x04000002},
255 {0x0000a508, 0x08000009, 0x08000009, 0x08000009, 0x08000009, 0x08000009, 0x08000009, 0x08000004, 0x08000004},
256 {0x0000a50c, 0x0c00000b, 0x0c00000b, 0x0c00000b, 0x0c00000b, 0x0c00000b, 0x0c00000b, 0x0c000006, 0x0c000006},
257 {0x0000a510, 0x1000000d, 0x1000000d, 0x1000000d, 0x1000000d, 0x1000000d, 0x1000000d, 0x0f00000a, 0x0f00000a},
258 {0x0000a514, 0x14000011, 0x14000011, 0x14000011, 0x14000011, 0x14000011, 0x14000011, 0x1300000c, 0x1300000c},
259 {0x0000a518, 0x19004008, 0x19004008, 0x19004008, 0x19004008, 0x18004008, 0x18004008, 0x1700000e, 0x1700000e},
260 {0x0000a51c, 0x1d00400a, 0x1d00400a, 0x1d00400a, 0x1d00400a, 0x1c00400a, 0x1c00400a, 0x1b000064, 0x1b000064},
261 {0x0000a520, 0x230020a2, 0x230020a2, 0x210020a2, 0x210020a2, 0x200020a2, 0x200020a2, 0x1f000242, 0x1f000242},
262 {0x0000a524, 0x2500006e, 0x2500006e, 0x2500006e, 0x2500006e, 0x2400006e, 0x2400006e, 0x23000229, 0x23000229},
263 {0x0000a528, 0x29022221, 0x29022221, 0x28022221, 0x28022221, 0x27022221, 0x27022221, 0x270002a2, 0x270002a2},
264 {0x0000a52c, 0x2d00062a, 0x2d00062a, 0x2c00062a, 0x2c00062a, 0x2a00062a, 0x2a00062a, 0x2c001203, 0x2c001203},
265 {0x0000a530, 0x340220a5, 0x340220a5, 0x320220a5, 0x320220a5, 0x2f0220a5, 0x2f0220a5, 0x30001803, 0x30001803},
266 {0x0000a534, 0x380022c5, 0x380022c5, 0x350022c5, 0x350022c5, 0x320022c5, 0x320022c5, 0x33000881, 0x33000881},
267 {0x0000a538, 0x3b002486, 0x3b002486, 0x39002486, 0x39002486, 0x36002486, 0x36002486, 0x38001809, 0x38001809},
268 {0x0000a53c, 0x3f00248a, 0x3f00248a, 0x3d00248a, 0x3d00248a, 0x3a00248a, 0x3a00248a, 0x3a000814, 0x3a000814},
269 {0x0000a540, 0x4202242c, 0x4202242c, 0x4102242c, 0x4102242c, 0x3f02242c, 0x3f02242c, 0x3f001a0c, 0x3f001a0c},
270 {0x0000a544, 0x490044c6, 0x490044c6, 0x460044c6, 0x460044c6, 0x420044c6, 0x420044c6, 0x43001a0e, 0x43001a0e},
271 {0x0000a548, 0x4d024485, 0x4d024485, 0x4a024485, 0x4a024485, 0x46024485, 0x46024485, 0x46001812, 0x46001812},
272 {0x0000a54c, 0x51044483, 0x51044483, 0x4e044483, 0x4e044483, 0x4a044483, 0x4a044483, 0x49001884, 0x49001884},
273 {0x0000a550, 0x5404a40c, 0x5404a40c, 0x5204a40c, 0x5204a40c, 0x4d04a40c, 0x4d04a40c, 0x4d001e84, 0x4d001e84},
274 {0x0000a554, 0x57024632, 0x57024632, 0x55024632, 0x55024632, 0x52024632, 0x52024632, 0x50001e69, 0x50001e69},
275 {0x0000a558, 0x5c00a634, 0x5c00a634, 0x5900a634, 0x5900a634, 0x5600a634, 0x5600a634, 0x550006f4, 0x550006f4},
276 {0x0000a55c, 0x5f026832, 0x5f026832, 0x5d026832, 0x5d026832, 0x5a026832, 0x5a026832, 0x59000ad3, 0x59000ad3},
277 {0x0000a560, 0x6602b012, 0x6602b012, 0x6202b012, 0x6202b012, 0x5d02b012, 0x5d02b012, 0x5e000ad5, 0x5e000ad5},
278 {0x0000a564, 0x6e02d0e1, 0x6e02d0e1, 0x6802d0e1, 0x6802d0e1, 0x6002d0e1, 0x6002d0e1, 0x61001ced, 0x61001ced},
279 {0x0000a568, 0x7202b4c4, 0x7202b4c4, 0x6c02b4c4, 0x6c02b4c4, 0x6502b4c4, 0x6502b4c4, 0x660018d4, 0x660018d4},
280 {0x0000a56c, 0x75007894, 0x75007894, 0x70007894, 0x70007894, 0x6b007894, 0x6b007894, 0x660018d4, 0x660018d4},
281 {0x0000a570, 0x7b025c74, 0x7b025c74, 0x75025c74, 0x75025c74, 0x70025c74, 0x70025c74, 0x660018d4, 0x660018d4},
282 {0x0000a574, 0x8300bcb5, 0x8300bcb5, 0x7a00bcb5, 0x7a00bcb5, 0x7600bcb5, 0x7600bcb5, 0x660018d4, 0x660018d4},
283 {0x0000a578, 0x8a04dc74, 0x8a04dc74, 0x7f04dc74, 0x7f04dc74, 0x7c04dc74, 0x7c04dc74, 0x660018d4, 0x660018d4},
284 {0x0000a57c, 0x8a04dc74, 0x8a04dc74, 0x7f04dc74, 0x7f04dc74, 0x7c04dc74, 0x7c04dc74, 0x660018d4, 0x660018d4},
285 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
286 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
287 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
288 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x03804000, 0x03804000},
289 {0x0000a610, 0x04c08c01, 0x04c08c01, 0x04808b01, 0x04808b01, 0x04808a01, 0x04808a01, 0x0300ca02, 0x0300ca02},
290 {0x0000a614, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00000e04, 0x00000e04},
291 {0x0000a618, 0x04010c01, 0x04010c01, 0x03c10b01, 0x03c10b01, 0x03810a01, 0x03810a01, 0x03014000, 0x03014000},
292 {0x0000a61c, 0x03814e05, 0x03814e05, 0x03414d05, 0x03414d05, 0x03414d05, 0x03414d05, 0x00000000, 0x00000000},
293 {0x0000a620, 0x04010303, 0x04010303, 0x03c10303, 0x03c10303, 0x03810303, 0x03810303, 0x00000000, 0x00000000},
294 {0x0000a624, 0x03814e05, 0x03814e05, 0x03414d05, 0x03414d05, 0x03414d05, 0x03414d05, 0x03014000, 0x03014000},
295 {0x0000a628, 0x00c0c000, 0x00c0c000, 0x00c0c000, 0x00c0c000, 0x00c0c000, 0x00c0c000, 0x03804c05, 0x03804c05},
296 {0x0000a62c, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x0701de06, 0x0701de06},
297 {0x0000a630, 0x03418000, 0x03418000, 0x03018000, 0x03018000, 0x02c18000, 0x02c18000, 0x07819c07, 0x07819c07},
298 {0x0000a634, 0x03815004, 0x03815004, 0x03414f04, 0x03414f04, 0x03414e04, 0x03414e04, 0x0701dc07, 0x0701dc07},
299 {0x0000a638, 0x03005302, 0x03005302, 0x02c05202, 0x02c05202, 0x02805202, 0x02805202, 0x0701dc07, 0x0701dc07},
300 {0x0000a63c, 0x04c09302, 0x04c09302, 0x04809202, 0x04809202, 0x04809202, 0x04809202, 0x0701dc07, 0x0701dc07},
301 {0x0000b2dc, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xfffd5aaa, 0xfffd5aaa},
302 {0x0000b2e0, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xfffe9ccc, 0xfffe9ccc},
303 {0x0000b2e4, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xffffe0f0, 0xffffe0f0},
304 {0x0000b2e8, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xfffcff00, 0xfffcff00},
305 {0x0000c2dc, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xfffd5aaa, 0xfffd5aaa},
306 {0x0000c2e0, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xfffe9ccc, 0xfffe9ccc},
307 {0x0000c2e4, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xffffe0f0, 0xffffe0f0},
308 {0x0000c2e8, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xfffcff00, 0xfffcff00},
309 {0x00016044, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x010002d4, 0x010002d4},
310 {0x00016048, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x66482401, 0x66482401},
311 {0x00016280, 0x01801e84, 0x01801e84, 0x01801e84, 0x01801e84, 0x01801e84, 0x01801e84, 0x01808e84, 0x01808e84},
312 {0x00016444, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x010002d4, 0x010002d4},
313 {0x00016448, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x66482401, 0x66482401},
314 {0x00016844, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x010002d4, 0x010002d4},
315 {0x00016848, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x66482401, 0x66482401},
316};
317
318static const u32 ar955x_1p0_mac_core[][2] = {
319 /* Addr allmodes */
320 {0x00000008, 0x00000000},
321 {0x00000030, 0x00020085},
322 {0x00000034, 0x00000005},
323 {0x00000040, 0x00000000},
324 {0x00000044, 0x00000000},
325 {0x00000048, 0x00000008},
326 {0x0000004c, 0x00000010},
327 {0x00000050, 0x00000000},
328 {0x00001040, 0x002ffc0f},
329 {0x00001044, 0x002ffc0f},
330 {0x00001048, 0x002ffc0f},
331 {0x0000104c, 0x002ffc0f},
332 {0x00001050, 0x002ffc0f},
333 {0x00001054, 0x002ffc0f},
334 {0x00001058, 0x002ffc0f},
335 {0x0000105c, 0x002ffc0f},
336 {0x00001060, 0x002ffc0f},
337 {0x00001064, 0x002ffc0f},
338 {0x000010f0, 0x00000100},
339 {0x00001270, 0x00000000},
340 {0x000012b0, 0x00000000},
341 {0x000012f0, 0x00000000},
342 {0x0000143c, 0x00000000},
343 {0x0000147c, 0x00000000},
344 {0x00008000, 0x00000000},
345 {0x00008004, 0x00000000},
346 {0x00008008, 0x00000000},
347 {0x0000800c, 0x00000000},
348 {0x00008018, 0x00000000},
349 {0x00008020, 0x00000000},
350 {0x00008038, 0x00000000},
351 {0x0000803c, 0x00000000},
352 {0x00008040, 0x00000000},
353 {0x00008044, 0x00000000},
354 {0x00008048, 0x00000000},
355 {0x0000804c, 0xffffffff},
356 {0x00008054, 0x00000000},
357 {0x00008058, 0x00000000},
358 {0x0000805c, 0x000fc78f},
359 {0x00008060, 0x0000000f},
360 {0x00008064, 0x00000000},
361 {0x00008070, 0x00000310},
362 {0x00008074, 0x00000020},
363 {0x00008078, 0x00000000},
364 {0x0000809c, 0x0000000f},
365 {0x000080a0, 0x00000000},
366 {0x000080a4, 0x02ff0000},
367 {0x000080a8, 0x0e070605},
368 {0x000080ac, 0x0000000d},
369 {0x000080b0, 0x00000000},
370 {0x000080b4, 0x00000000},
371 {0x000080b8, 0x00000000},
372 {0x000080bc, 0x00000000},
373 {0x000080c0, 0x2a800000},
374 {0x000080c4, 0x06900168},
375 {0x000080c8, 0x13881c22},
376 {0x000080cc, 0x01f40000},
377 {0x000080d0, 0x00252500},
378 {0x000080d4, 0x00a00000},
379 {0x000080d8, 0x00400000},
380 {0x000080dc, 0x00000000},
381 {0x000080e0, 0xffffffff},
382 {0x000080e4, 0x0000ffff},
383 {0x000080e8, 0x3f3f3f3f},
384 {0x000080ec, 0x00000000},
385 {0x000080f0, 0x00000000},
386 {0x000080f4, 0x00000000},
387 {0x000080fc, 0x00020000},
388 {0x00008100, 0x00000000},
389 {0x00008108, 0x00000052},
390 {0x0000810c, 0x00000000},
391 {0x00008110, 0x00000000},
392 {0x00008114, 0x000007ff},
393 {0x00008118, 0x000000aa},
394 {0x0000811c, 0x00003210},
395 {0x00008124, 0x00000000},
396 {0x00008128, 0x00000000},
397 {0x0000812c, 0x00000000},
398 {0x00008130, 0x00000000},
399 {0x00008134, 0x00000000},
400 {0x00008138, 0x00000000},
401 {0x0000813c, 0x0000ffff},
402 {0x00008140, 0x000000fe},
403 {0x00008144, 0xffffffff},
404 {0x00008168, 0x00000000},
405 {0x0000816c, 0x00000000},
406 {0x000081c0, 0x00000000},
407 {0x000081c4, 0x33332210},
408 {0x000081ec, 0x00000000},
409 {0x000081f0, 0x00000000},
410 {0x000081f4, 0x00000000},
411 {0x000081f8, 0x00000000},
412 {0x000081fc, 0x00000000},
413 {0x00008240, 0x00100000},
414 {0x00008244, 0x0010f400},
415 {0x00008248, 0x00000800},
416 {0x0000824c, 0x0001e800},
417 {0x00008250, 0x00000000},
418 {0x00008254, 0x00000000},
419 {0x00008258, 0x00000000},
420 {0x0000825c, 0x40000000},
421 {0x00008260, 0x00080922},
422 {0x00008264, 0x9d400010},
423 {0x00008268, 0xffffffff},
424 {0x0000826c, 0x0000ffff},
425 {0x00008270, 0x00000000},
426 {0x00008274, 0x40000000},
427 {0x00008278, 0x003e4180},
428 {0x0000827c, 0x00000004},
429 {0x00008284, 0x0000002c},
430 {0x00008288, 0x0000002c},
431 {0x0000828c, 0x000000ff},
432 {0x00008294, 0x00000000},
433 {0x00008298, 0x00000000},
434 {0x0000829c, 0x00000000},
435 {0x00008300, 0x00001d40},
436 {0x00008314, 0x00000000},
437 {0x0000831c, 0x0000010d},
438 {0x00008328, 0x00000000},
439 {0x0000832c, 0x0000001f},
440 {0x00008330, 0x00000302},
441 {0x00008334, 0x00000700},
442 {0x00008338, 0xffff0000},
443 {0x0000833c, 0x02400000},
444 {0x00008340, 0x000107ff},
445 {0x00008344, 0xaa48107b},
446 {0x00008348, 0x008f0000},
447 {0x0000835c, 0x00000000},
448 {0x00008360, 0xffffffff},
449 {0x00008364, 0xffffffff},
450 {0x00008368, 0x00000000},
451 {0x00008370, 0x00000000},
452 {0x00008374, 0x000000ff},
453 {0x00008378, 0x00000000},
454 {0x0000837c, 0x00000000},
455 {0x00008380, 0xffffffff},
456 {0x00008384, 0xffffffff},
457 {0x00008390, 0xffffffff},
458 {0x00008394, 0xffffffff},
459 {0x00008398, 0x00000000},
460 {0x0000839c, 0x00000000},
461 {0x000083a0, 0x00000000},
462 {0x000083a4, 0x0000fa14},
463 {0x000083a8, 0x000f0c00},
464 {0x000083ac, 0x33332210},
465 {0x000083b0, 0x33332210},
466 {0x000083b4, 0x33332210},
467 {0x000083b8, 0x33332210},
468 {0x000083bc, 0x00000000},
469 {0x000083c0, 0x00000000},
470 {0x000083c4, 0x00000000},
471 {0x000083c8, 0x00000000},
472 {0x000083cc, 0x00000200},
473 {0x000083d0, 0x8c7901ff},
474};
475
476static const u32 ar955x_1p0_common_rx_gain_table[][2] = {
477 /* Addr allmodes */
478 {0x0000a000, 0x00010000},
479 {0x0000a004, 0x00030002},
480 {0x0000a008, 0x00050004},
481 {0x0000a00c, 0x00810080},
482 {0x0000a010, 0x00830082},
483 {0x0000a014, 0x01810180},
484 {0x0000a018, 0x01830182},
485 {0x0000a01c, 0x01850184},
486 {0x0000a020, 0x01890188},
487 {0x0000a024, 0x018b018a},
488 {0x0000a028, 0x018d018c},
489 {0x0000a02c, 0x01910190},
490 {0x0000a030, 0x01930192},
491 {0x0000a034, 0x01950194},
492 {0x0000a038, 0x038a0196},
493 {0x0000a03c, 0x038c038b},
494 {0x0000a040, 0x0390038d},
495 {0x0000a044, 0x03920391},
496 {0x0000a048, 0x03940393},
497 {0x0000a04c, 0x03960395},
498 {0x0000a050, 0x00000000},
499 {0x0000a054, 0x00000000},
500 {0x0000a058, 0x00000000},
501 {0x0000a05c, 0x00000000},
502 {0x0000a060, 0x00000000},
503 {0x0000a064, 0x00000000},
504 {0x0000a068, 0x00000000},
505 {0x0000a06c, 0x00000000},
506 {0x0000a070, 0x00000000},
507 {0x0000a074, 0x00000000},
508 {0x0000a078, 0x00000000},
509 {0x0000a07c, 0x00000000},
510 {0x0000a080, 0x22222229},
511 {0x0000a084, 0x1d1d1d1d},
512 {0x0000a088, 0x1d1d1d1d},
513 {0x0000a08c, 0x1d1d1d1d},
514 {0x0000a090, 0x171d1d1d},
515 {0x0000a094, 0x11111717},
516 {0x0000a098, 0x00030311},
517 {0x0000a09c, 0x00000000},
518 {0x0000a0a0, 0x00000000},
519 {0x0000a0a4, 0x00000000},
520 {0x0000a0a8, 0x00000000},
521 {0x0000a0ac, 0x00000000},
522 {0x0000a0b0, 0x00000000},
523 {0x0000a0b4, 0x00000000},
524 {0x0000a0b8, 0x00000000},
525 {0x0000a0bc, 0x00000000},
526 {0x0000a0c0, 0x001f0000},
527 {0x0000a0c4, 0x01000101},
528 {0x0000a0c8, 0x011e011f},
529 {0x0000a0cc, 0x011c011d},
530 {0x0000a0d0, 0x02030204},
531 {0x0000a0d4, 0x02010202},
532 {0x0000a0d8, 0x021f0200},
533 {0x0000a0dc, 0x0302021e},
534 {0x0000a0e0, 0x03000301},
535 {0x0000a0e4, 0x031e031f},
536 {0x0000a0e8, 0x0402031d},
537 {0x0000a0ec, 0x04000401},
538 {0x0000a0f0, 0x041e041f},
539 {0x0000a0f4, 0x0502041d},
540 {0x0000a0f8, 0x05000501},
541 {0x0000a0fc, 0x051e051f},
542 {0x0000a100, 0x06010602},
543 {0x0000a104, 0x061f0600},
544 {0x0000a108, 0x061d061e},
545 {0x0000a10c, 0x07020703},
546 {0x0000a110, 0x07000701},
547 {0x0000a114, 0x00000000},
548 {0x0000a118, 0x00000000},
549 {0x0000a11c, 0x00000000},
550 {0x0000a120, 0x00000000},
551 {0x0000a124, 0x00000000},
552 {0x0000a128, 0x00000000},
553 {0x0000a12c, 0x00000000},
554 {0x0000a130, 0x00000000},
555 {0x0000a134, 0x00000000},
556 {0x0000a138, 0x00000000},
557 {0x0000a13c, 0x00000000},
558 {0x0000a140, 0x001f0000},
559 {0x0000a144, 0x01000101},
560 {0x0000a148, 0x011e011f},
561 {0x0000a14c, 0x011c011d},
562 {0x0000a150, 0x02030204},
563 {0x0000a154, 0x02010202},
564 {0x0000a158, 0x021f0200},
565 {0x0000a15c, 0x0302021e},
566 {0x0000a160, 0x03000301},
567 {0x0000a164, 0x031e031f},
568 {0x0000a168, 0x0402031d},
569 {0x0000a16c, 0x04000401},
570 {0x0000a170, 0x041e041f},
571 {0x0000a174, 0x0502041d},
572 {0x0000a178, 0x05000501},
573 {0x0000a17c, 0x051e051f},
574 {0x0000a180, 0x06010602},
575 {0x0000a184, 0x061f0600},
576 {0x0000a188, 0x061d061e},
577 {0x0000a18c, 0x07020703},
578 {0x0000a190, 0x07000701},
579 {0x0000a194, 0x00000000},
580 {0x0000a198, 0x00000000},
581 {0x0000a19c, 0x00000000},
582 {0x0000a1a0, 0x00000000},
583 {0x0000a1a4, 0x00000000},
584 {0x0000a1a8, 0x00000000},
585 {0x0000a1ac, 0x00000000},
586 {0x0000a1b0, 0x00000000},
587 {0x0000a1b4, 0x00000000},
588 {0x0000a1b8, 0x00000000},
589 {0x0000a1bc, 0x00000000},
590 {0x0000a1c0, 0x00000000},
591 {0x0000a1c4, 0x00000000},
592 {0x0000a1c8, 0x00000000},
593 {0x0000a1cc, 0x00000000},
594 {0x0000a1d0, 0x00000000},
595 {0x0000a1d4, 0x00000000},
596 {0x0000a1d8, 0x00000000},
597 {0x0000a1dc, 0x00000000},
598 {0x0000a1e0, 0x00000000},
599 {0x0000a1e4, 0x00000000},
600 {0x0000a1e8, 0x00000000},
601 {0x0000a1ec, 0x00000000},
602 {0x0000a1f0, 0x00000396},
603 {0x0000a1f4, 0x00000396},
604 {0x0000a1f8, 0x00000396},
605 {0x0000a1fc, 0x00000196},
606 {0x0000b000, 0x00010000},
607 {0x0000b004, 0x00030002},
608 {0x0000b008, 0x00050004},
609 {0x0000b00c, 0x00810080},
610 {0x0000b010, 0x00830082},
611 {0x0000b014, 0x01810180},
612 {0x0000b018, 0x01830182},
613 {0x0000b01c, 0x01850184},
614 {0x0000b020, 0x02810280},
615 {0x0000b024, 0x02830282},
616 {0x0000b028, 0x02850284},
617 {0x0000b02c, 0x02890288},
618 {0x0000b030, 0x028b028a},
619 {0x0000b034, 0x0388028c},
620 {0x0000b038, 0x038a0389},
621 {0x0000b03c, 0x038c038b},
622 {0x0000b040, 0x0390038d},
623 {0x0000b044, 0x03920391},
624 {0x0000b048, 0x03940393},
625 {0x0000b04c, 0x03960395},
626 {0x0000b050, 0x00000000},
627 {0x0000b054, 0x00000000},
628 {0x0000b058, 0x00000000},
629 {0x0000b05c, 0x00000000},
630 {0x0000b060, 0x00000000},
631 {0x0000b064, 0x00000000},
632 {0x0000b068, 0x00000000},
633 {0x0000b06c, 0x00000000},
634 {0x0000b070, 0x00000000},
635 {0x0000b074, 0x00000000},
636 {0x0000b078, 0x00000000},
637 {0x0000b07c, 0x00000000},
638 {0x0000b080, 0x23232323},
639 {0x0000b084, 0x21232323},
640 {0x0000b088, 0x19191c1e},
641 {0x0000b08c, 0x12141417},
642 {0x0000b090, 0x07070e0e},
643 {0x0000b094, 0x03030305},
644 {0x0000b098, 0x00000003},
645 {0x0000b09c, 0x00000000},
646 {0x0000b0a0, 0x00000000},
647 {0x0000b0a4, 0x00000000},
648 {0x0000b0a8, 0x00000000},
649 {0x0000b0ac, 0x00000000},
650 {0x0000b0b0, 0x00000000},
651 {0x0000b0b4, 0x00000000},
652 {0x0000b0b8, 0x00000000},
653 {0x0000b0bc, 0x00000000},
654 {0x0000b0c0, 0x003f0020},
655 {0x0000b0c4, 0x00400041},
656 {0x0000b0c8, 0x0140005f},
657 {0x0000b0cc, 0x0160015f},
658 {0x0000b0d0, 0x017e017f},
659 {0x0000b0d4, 0x02410242},
660 {0x0000b0d8, 0x025f0240},
661 {0x0000b0dc, 0x027f0260},
662 {0x0000b0e0, 0x0341027e},
663 {0x0000b0e4, 0x035f0340},
664 {0x0000b0e8, 0x037f0360},
665 {0x0000b0ec, 0x04400441},
666 {0x0000b0f0, 0x0460045f},
667 {0x0000b0f4, 0x0541047f},
668 {0x0000b0f8, 0x055f0540},
669 {0x0000b0fc, 0x057f0560},
670 {0x0000b100, 0x06400641},
671 {0x0000b104, 0x0660065f},
672 {0x0000b108, 0x067e067f},
673 {0x0000b10c, 0x07410742},
674 {0x0000b110, 0x075f0740},
675 {0x0000b114, 0x077f0760},
676 {0x0000b118, 0x07800781},
677 {0x0000b11c, 0x07a0079f},
678 {0x0000b120, 0x07c107bf},
679 {0x0000b124, 0x000007c0},
680 {0x0000b128, 0x00000000},
681 {0x0000b12c, 0x00000000},
682 {0x0000b130, 0x00000000},
683 {0x0000b134, 0x00000000},
684 {0x0000b138, 0x00000000},
685 {0x0000b13c, 0x00000000},
686 {0x0000b140, 0x003f0020},
687 {0x0000b144, 0x00400041},
688 {0x0000b148, 0x0140005f},
689 {0x0000b14c, 0x0160015f},
690 {0x0000b150, 0x017e017f},
691 {0x0000b154, 0x02410242},
692 {0x0000b158, 0x025f0240},
693 {0x0000b15c, 0x027f0260},
694 {0x0000b160, 0x0341027e},
695 {0x0000b164, 0x035f0340},
696 {0x0000b168, 0x037f0360},
697 {0x0000b16c, 0x04400441},
698 {0x0000b170, 0x0460045f},
699 {0x0000b174, 0x0541047f},
700 {0x0000b178, 0x055f0540},
701 {0x0000b17c, 0x057f0560},
702 {0x0000b180, 0x06400641},
703 {0x0000b184, 0x0660065f},
704 {0x0000b188, 0x067e067f},
705 {0x0000b18c, 0x07410742},
706 {0x0000b190, 0x075f0740},
707 {0x0000b194, 0x077f0760},
708 {0x0000b198, 0x07800781},
709 {0x0000b19c, 0x07a0079f},
710 {0x0000b1a0, 0x07c107bf},
711 {0x0000b1a4, 0x000007c0},
712 {0x0000b1a8, 0x00000000},
713 {0x0000b1ac, 0x00000000},
714 {0x0000b1b0, 0x00000000},
715 {0x0000b1b4, 0x00000000},
716 {0x0000b1b8, 0x00000000},
717 {0x0000b1bc, 0x00000000},
718 {0x0000b1c0, 0x00000000},
719 {0x0000b1c4, 0x00000000},
720 {0x0000b1c8, 0x00000000},
721 {0x0000b1cc, 0x00000000},
722 {0x0000b1d0, 0x00000000},
723 {0x0000b1d4, 0x00000000},
724 {0x0000b1d8, 0x00000000},
725 {0x0000b1dc, 0x00000000},
726 {0x0000b1e0, 0x00000000},
727 {0x0000b1e4, 0x00000000},
728 {0x0000b1e8, 0x00000000},
729 {0x0000b1ec, 0x00000000},
730 {0x0000b1f0, 0x00000396},
731 {0x0000b1f4, 0x00000396},
732 {0x0000b1f8, 0x00000396},
733 {0x0000b1fc, 0x00000196},
734};
735
736static const u32 ar955x_1p0_baseband_core[][2] = {
737 /* Addr allmodes */
738 {0x00009800, 0xafe68e30},
739 {0x00009804, 0xfd14e000},
740 {0x00009808, 0x9c0a9f6b},
741 {0x0000980c, 0x04900000},
742 {0x00009814, 0x0280c00a},
743 {0x00009818, 0x00000000},
744 {0x0000981c, 0x00020028},
745 {0x00009834, 0x6400a190},
746 {0x00009838, 0x0108ecff},
747 {0x0000983c, 0x14000600},
748 {0x00009880, 0x201fff00},
749 {0x00009884, 0x00001042},
750 {0x000098a4, 0x00200400},
751 {0x000098b0, 0x32840bbe},
752 {0x000098bc, 0x00000002},
753 {0x000098d0, 0x004b6a8e},
754 {0x000098d4, 0x00000820},
755 {0x000098dc, 0x00000000},
756 {0x000098f0, 0x00000000},
757 {0x000098f4, 0x00000000},
758 {0x00009c04, 0xff55ff55},
759 {0x00009c08, 0x0320ff55},
760 {0x00009c0c, 0x00000000},
761 {0x00009c10, 0x00000000},
762 {0x00009c14, 0x00046384},
763 {0x00009c18, 0x05b6b440},
764 {0x00009c1c, 0x00b6b440},
765 {0x00009d00, 0xc080a333},
766 {0x00009d04, 0x40206c10},
767 {0x00009d08, 0x009c4060},
768 {0x00009d0c, 0x9883800a},
769 {0x00009d10, 0x01834061},
770 {0x00009d14, 0x00c0040b},
771 {0x00009d18, 0x00000000},
772 {0x00009e08, 0x0038230c},
773 {0x00009e24, 0x990bb515},
774 {0x00009e28, 0x0c6f0000},
775 {0x00009e30, 0x06336f77},
776 {0x00009e34, 0x6af6532f},
777 {0x00009e38, 0x0cc80c00},
778 {0x00009e40, 0x0d261820},
779 {0x00009e4c, 0x00001004},
780 {0x00009e50, 0x00ff03f1},
781 {0x00009fc0, 0x813e4788},
782 {0x00009fc4, 0x0001efb5},
783 {0x00009fcc, 0x40000014},
784 {0x00009fd0, 0x01193b93},
785 {0x0000a20c, 0x00000000},
786 {0x0000a220, 0x00000000},
787 {0x0000a224, 0x00000000},
788 {0x0000a228, 0x10002310},
789 {0x0000a23c, 0x00000000},
790 {0x0000a244, 0x0c000000},
791 {0x0000a248, 0x00000140},
792 {0x0000a2a0, 0x00000007},
793 {0x0000a2c0, 0x00000007},
794 {0x0000a2c8, 0x00000000},
795 {0x0000a2d4, 0x00000000},
796 {0x0000a2ec, 0x00000000},
797 {0x0000a2f0, 0x00000000},
798 {0x0000a2f4, 0x00000000},
799 {0x0000a2f8, 0x00000000},
800 {0x0000a344, 0x00000000},
801 {0x0000a34c, 0x00000000},
802 {0x0000a350, 0x0000a000},
803 {0x0000a364, 0x00000000},
804 {0x0000a370, 0x00000000},
805 {0x0000a390, 0x00000001},
806 {0x0000a394, 0x00000444},
807 {0x0000a398, 0x1f020503},
808 {0x0000a39c, 0x29180c03},
809 {0x0000a3a0, 0x9a8b6844},
810 {0x0000a3a4, 0x00000000},
811 {0x0000a3a8, 0xaaaaaaaa},
812 {0x0000a3ac, 0x3c466478},
813 {0x0000a3c0, 0x20202020},
814 {0x0000a3c4, 0x22222220},
815 {0x0000a3c8, 0x20200020},
816 {0x0000a3cc, 0x20202020},
817 {0x0000a3d0, 0x20202020},
818 {0x0000a3d4, 0x20202020},
819 {0x0000a3d8, 0x20202020},
820 {0x0000a3dc, 0x20202020},
821 {0x0000a3e0, 0x20202020},
822 {0x0000a3e4, 0x20202020},
823 {0x0000a3e8, 0x20202020},
824 {0x0000a3ec, 0x20202020},
825 {0x0000a3f0, 0x00000000},
826 {0x0000a3f4, 0x00000000},
827 {0x0000a3f8, 0x0c9bd380},
828 {0x0000a3fc, 0x000f0f01},
829 {0x0000a400, 0x8fa91f01},
830 {0x0000a404, 0x00000000},
831 {0x0000a408, 0x0e79e5c6},
832 {0x0000a40c, 0x00820820},
833 {0x0000a414, 0x1ce739ce},
834 {0x0000a418, 0x2d001dce},
835 {0x0000a41c, 0x1ce739ce},
836 {0x0000a420, 0x000001ce},
837 {0x0000a424, 0x1ce739ce},
838 {0x0000a428, 0x000001ce},
839 {0x0000a42c, 0x1ce739ce},
840 {0x0000a430, 0x1ce739ce},
841 {0x0000a434, 0x00000000},
842 {0x0000a438, 0x00001801},
843 {0x0000a43c, 0x00100000},
844 {0x0000a444, 0x00000000},
845 {0x0000a448, 0x05000080},
846 {0x0000a44c, 0x00000001},
847 {0x0000a450, 0x00010000},
848 {0x0000a458, 0x00000000},
849 {0x0000a644, 0x3fad9d74},
850 {0x0000a648, 0x0048060a},
851 {0x0000a64c, 0x00003c37},
852 {0x0000a670, 0x03020100},
853 {0x0000a674, 0x09080504},
854 {0x0000a678, 0x0d0c0b0a},
855 {0x0000a67c, 0x13121110},
856 {0x0000a680, 0x31301514},
857 {0x0000a684, 0x35343332},
858 {0x0000a688, 0x00000036},
859 {0x0000a690, 0x00000838},
860 {0x0000a7cc, 0x00000000},
861 {0x0000a7d0, 0x00000000},
862 {0x0000a7d4, 0x00000004},
863 {0x0000a7dc, 0x00000000},
864 {0x0000a8d0, 0x004b6a8e},
865 {0x0000a8d4, 0x00000820},
866 {0x0000a8dc, 0x00000000},
867 {0x0000a8f0, 0x00000000},
868 {0x0000a8f4, 0x00000000},
869 {0x0000b2d0, 0x00000080},
870 {0x0000b2d4, 0x00000000},
871 {0x0000b2ec, 0x00000000},
872 {0x0000b2f0, 0x00000000},
873 {0x0000b2f4, 0x00000000},
874 {0x0000b2f8, 0x00000000},
875 {0x0000b408, 0x0e79e5c0},
876 {0x0000b40c, 0x00820820},
877 {0x0000b420, 0x00000000},
878 {0x0000b8d0, 0x004b6a8e},
879 {0x0000b8d4, 0x00000820},
880 {0x0000b8dc, 0x00000000},
881 {0x0000b8f0, 0x00000000},
882 {0x0000b8f4, 0x00000000},
883 {0x0000c2d0, 0x00000080},
884 {0x0000c2d4, 0x00000000},
885 {0x0000c2ec, 0x00000000},
886 {0x0000c2f0, 0x00000000},
887 {0x0000c2f4, 0x00000000},
888 {0x0000c2f8, 0x00000000},
889 {0x0000c408, 0x0e79e5c0},
890 {0x0000c40c, 0x00820820},
891 {0x0000c420, 0x00000000},
892};
893
894static const u32 ar955x_1p0_common_wo_xlna_rx_gain_table[][2] = {
895 /* Addr allmodes */
896 {0x0000a000, 0x00010000},
897 {0x0000a004, 0x00030002},
898 {0x0000a008, 0x00050004},
899 {0x0000a00c, 0x00810080},
900 {0x0000a010, 0x00830082},
901 {0x0000a014, 0x01810180},
902 {0x0000a018, 0x01830182},
903 {0x0000a01c, 0x01850184},
904 {0x0000a020, 0x01890188},
905 {0x0000a024, 0x018b018a},
906 {0x0000a028, 0x018d018c},
907 {0x0000a02c, 0x03820190},
908 {0x0000a030, 0x03840383},
909 {0x0000a034, 0x03880385},
910 {0x0000a038, 0x038a0389},
911 {0x0000a03c, 0x038c038b},
912 {0x0000a040, 0x0390038d},
913 {0x0000a044, 0x03920391},
914 {0x0000a048, 0x03940393},
915 {0x0000a04c, 0x03960395},
916 {0x0000a050, 0x00000000},
917 {0x0000a054, 0x00000000},
918 {0x0000a058, 0x00000000},
919 {0x0000a05c, 0x00000000},
920 {0x0000a060, 0x00000000},
921 {0x0000a064, 0x00000000},
922 {0x0000a068, 0x00000000},
923 {0x0000a06c, 0x00000000},
924 {0x0000a070, 0x00000000},
925 {0x0000a074, 0x00000000},
926 {0x0000a078, 0x00000000},
927 {0x0000a07c, 0x00000000},
928 {0x0000a080, 0x29292929},
929 {0x0000a084, 0x29292929},
930 {0x0000a088, 0x29292929},
931 {0x0000a08c, 0x29292929},
932 {0x0000a090, 0x22292929},
933 {0x0000a094, 0x1d1d2222},
934 {0x0000a098, 0x0c111117},
935 {0x0000a09c, 0x00030303},
936 {0x0000a0a0, 0x00000000},
937 {0x0000a0a4, 0x00000000},
938 {0x0000a0a8, 0x00000000},
939 {0x0000a0ac, 0x00000000},
940 {0x0000a0b0, 0x00000000},
941 {0x0000a0b4, 0x00000000},
942 {0x0000a0b8, 0x00000000},
943 {0x0000a0bc, 0x00000000},
944 {0x0000a0c0, 0x001f0000},
945 {0x0000a0c4, 0x01000101},
946 {0x0000a0c8, 0x011e011f},
947 {0x0000a0cc, 0x011c011d},
948 {0x0000a0d0, 0x02030204},
949 {0x0000a0d4, 0x02010202},
950 {0x0000a0d8, 0x021f0200},
951 {0x0000a0dc, 0x0302021e},
952 {0x0000a0e0, 0x03000301},
953 {0x0000a0e4, 0x031e031f},
954 {0x0000a0e8, 0x0402031d},
955 {0x0000a0ec, 0x04000401},
956 {0x0000a0f0, 0x041e041f},
957 {0x0000a0f4, 0x0502041d},
958 {0x0000a0f8, 0x05000501},
959 {0x0000a0fc, 0x051e051f},
960 {0x0000a100, 0x06010602},
961 {0x0000a104, 0x061f0600},
962 {0x0000a108, 0x061d061e},
963 {0x0000a10c, 0x07020703},
964 {0x0000a110, 0x07000701},
965 {0x0000a114, 0x00000000},
966 {0x0000a118, 0x00000000},
967 {0x0000a11c, 0x00000000},
968 {0x0000a120, 0x00000000},
969 {0x0000a124, 0x00000000},
970 {0x0000a128, 0x00000000},
971 {0x0000a12c, 0x00000000},
972 {0x0000a130, 0x00000000},
973 {0x0000a134, 0x00000000},
974 {0x0000a138, 0x00000000},
975 {0x0000a13c, 0x00000000},
976 {0x0000a140, 0x001f0000},
977 {0x0000a144, 0x01000101},
978 {0x0000a148, 0x011e011f},
979 {0x0000a14c, 0x011c011d},
980 {0x0000a150, 0x02030204},
981 {0x0000a154, 0x02010202},
982 {0x0000a158, 0x021f0200},
983 {0x0000a15c, 0x0302021e},
984 {0x0000a160, 0x03000301},
985 {0x0000a164, 0x031e031f},
986 {0x0000a168, 0x0402031d},
987 {0x0000a16c, 0x04000401},
988 {0x0000a170, 0x041e041f},
989 {0x0000a174, 0x0502041d},
990 {0x0000a178, 0x05000501},
991 {0x0000a17c, 0x051e051f},
992 {0x0000a180, 0x06010602},
993 {0x0000a184, 0x061f0600},
994 {0x0000a188, 0x061d061e},
995 {0x0000a18c, 0x07020703},
996 {0x0000a190, 0x07000701},
997 {0x0000a194, 0x00000000},
998 {0x0000a198, 0x00000000},
999 {0x0000a19c, 0x00000000},
1000 {0x0000a1a0, 0x00000000},
1001 {0x0000a1a4, 0x00000000},
1002 {0x0000a1a8, 0x00000000},
1003 {0x0000a1ac, 0x00000000},
1004 {0x0000a1b0, 0x00000000},
1005 {0x0000a1b4, 0x00000000},
1006 {0x0000a1b8, 0x00000000},
1007 {0x0000a1bc, 0x00000000},
1008 {0x0000a1c0, 0x00000000},
1009 {0x0000a1c4, 0x00000000},
1010 {0x0000a1c8, 0x00000000},
1011 {0x0000a1cc, 0x00000000},
1012 {0x0000a1d0, 0x00000000},
1013 {0x0000a1d4, 0x00000000},
1014 {0x0000a1d8, 0x00000000},
1015 {0x0000a1dc, 0x00000000},
1016 {0x0000a1e0, 0x00000000},
1017 {0x0000a1e4, 0x00000000},
1018 {0x0000a1e8, 0x00000000},
1019 {0x0000a1ec, 0x00000000},
1020 {0x0000a1f0, 0x00000396},
1021 {0x0000a1f4, 0x00000396},
1022 {0x0000a1f8, 0x00000396},
1023 {0x0000a1fc, 0x00000196},
1024 {0x0000b000, 0x00010000},
1025 {0x0000b004, 0x00030002},
1026 {0x0000b008, 0x00050004},
1027 {0x0000b00c, 0x00810080},
1028 {0x0000b010, 0x00830082},
1029 {0x0000b014, 0x01810180},
1030 {0x0000b018, 0x01830182},
1031 {0x0000b01c, 0x01850184},
1032 {0x0000b020, 0x02810280},
1033 {0x0000b024, 0x02830282},
1034 {0x0000b028, 0x02850284},
1035 {0x0000b02c, 0x02890288},
1036 {0x0000b030, 0x028b028a},
1037 {0x0000b034, 0x0388028c},
1038 {0x0000b038, 0x038a0389},
1039 {0x0000b03c, 0x038c038b},
1040 {0x0000b040, 0x0390038d},
1041 {0x0000b044, 0x03920391},
1042 {0x0000b048, 0x03940393},
1043 {0x0000b04c, 0x03960395},
1044 {0x0000b050, 0x00000000},
1045 {0x0000b054, 0x00000000},
1046 {0x0000b058, 0x00000000},
1047 {0x0000b05c, 0x00000000},
1048 {0x0000b060, 0x00000000},
1049 {0x0000b064, 0x00000000},
1050 {0x0000b068, 0x00000000},
1051 {0x0000b06c, 0x00000000},
1052 {0x0000b070, 0x00000000},
1053 {0x0000b074, 0x00000000},
1054 {0x0000b078, 0x00000000},
1055 {0x0000b07c, 0x00000000},
1056 {0x0000b080, 0x32323232},
1057 {0x0000b084, 0x2f2f3232},
1058 {0x0000b088, 0x23282a2d},
1059 {0x0000b08c, 0x1c1e2123},
1060 {0x0000b090, 0x14171919},
1061 {0x0000b094, 0x0e0e1214},
1062 {0x0000b098, 0x03050707},
1063 {0x0000b09c, 0x00030303},
1064 {0x0000b0a0, 0x00000000},
1065 {0x0000b0a4, 0x00000000},
1066 {0x0000b0a8, 0x00000000},
1067 {0x0000b0ac, 0x00000000},
1068 {0x0000b0b0, 0x00000000},
1069 {0x0000b0b4, 0x00000000},
1070 {0x0000b0b8, 0x00000000},
1071 {0x0000b0bc, 0x00000000},
1072 {0x0000b0c0, 0x003f0020},
1073 {0x0000b0c4, 0x00400041},
1074 {0x0000b0c8, 0x0140005f},
1075 {0x0000b0cc, 0x0160015f},
1076 {0x0000b0d0, 0x017e017f},
1077 {0x0000b0d4, 0x02410242},
1078 {0x0000b0d8, 0x025f0240},
1079 {0x0000b0dc, 0x027f0260},
1080 {0x0000b0e0, 0x0341027e},
1081 {0x0000b0e4, 0x035f0340},
1082 {0x0000b0e8, 0x037f0360},
1083 {0x0000b0ec, 0x04400441},
1084 {0x0000b0f0, 0x0460045f},
1085 {0x0000b0f4, 0x0541047f},
1086 {0x0000b0f8, 0x055f0540},
1087 {0x0000b0fc, 0x057f0560},
1088 {0x0000b100, 0x06400641},
1089 {0x0000b104, 0x0660065f},
1090 {0x0000b108, 0x067e067f},
1091 {0x0000b10c, 0x07410742},
1092 {0x0000b110, 0x075f0740},
1093 {0x0000b114, 0x077f0760},
1094 {0x0000b118, 0x07800781},
1095 {0x0000b11c, 0x07a0079f},
1096 {0x0000b120, 0x07c107bf},
1097 {0x0000b124, 0x000007c0},
1098 {0x0000b128, 0x00000000},
1099 {0x0000b12c, 0x00000000},
1100 {0x0000b130, 0x00000000},
1101 {0x0000b134, 0x00000000},
1102 {0x0000b138, 0x00000000},
1103 {0x0000b13c, 0x00000000},
1104 {0x0000b140, 0x003f0020},
1105 {0x0000b144, 0x00400041},
1106 {0x0000b148, 0x0140005f},
1107 {0x0000b14c, 0x0160015f},
1108 {0x0000b150, 0x017e017f},
1109 {0x0000b154, 0x02410242},
1110 {0x0000b158, 0x025f0240},
1111 {0x0000b15c, 0x027f0260},
1112 {0x0000b160, 0x0341027e},
1113 {0x0000b164, 0x035f0340},
1114 {0x0000b168, 0x037f0360},
1115 {0x0000b16c, 0x04400441},
1116 {0x0000b170, 0x0460045f},
1117 {0x0000b174, 0x0541047f},
1118 {0x0000b178, 0x055f0540},
1119 {0x0000b17c, 0x057f0560},
1120 {0x0000b180, 0x06400641},
1121 {0x0000b184, 0x0660065f},
1122 {0x0000b188, 0x067e067f},
1123 {0x0000b18c, 0x07410742},
1124 {0x0000b190, 0x075f0740},
1125 {0x0000b194, 0x077f0760},
1126 {0x0000b198, 0x07800781},
1127 {0x0000b19c, 0x07a0079f},
1128 {0x0000b1a0, 0x07c107bf},
1129 {0x0000b1a4, 0x000007c0},
1130 {0x0000b1a8, 0x00000000},
1131 {0x0000b1ac, 0x00000000},
1132 {0x0000b1b0, 0x00000000},
1133 {0x0000b1b4, 0x00000000},
1134 {0x0000b1b8, 0x00000000},
1135 {0x0000b1bc, 0x00000000},
1136 {0x0000b1c0, 0x00000000},
1137 {0x0000b1c4, 0x00000000},
1138 {0x0000b1c8, 0x00000000},
1139 {0x0000b1cc, 0x00000000},
1140 {0x0000b1d0, 0x00000000},
1141 {0x0000b1d4, 0x00000000},
1142 {0x0000b1d8, 0x00000000},
1143 {0x0000b1dc, 0x00000000},
1144 {0x0000b1e0, 0x00000000},
1145 {0x0000b1e4, 0x00000000},
1146 {0x0000b1e8, 0x00000000},
1147 {0x0000b1ec, 0x00000000},
1148 {0x0000b1f0, 0x00000396},
1149 {0x0000b1f4, 0x00000396},
1150 {0x0000b1f8, 0x00000396},
1151 {0x0000b1fc, 0x00000196},
1152};
1153
1154static const u32 ar955x_1p0_soc_preamble[][2] = {
1155 /* Addr allmodes */
1156 {0x00007000, 0x00000000},
1157 {0x00007004, 0x00000000},
1158 {0x00007008, 0x00000000},
1159 {0x0000700c, 0x00000000},
1160 {0x0000701c, 0x00000000},
1161 {0x00007020, 0x00000000},
1162 {0x00007024, 0x00000000},
1163 {0x00007028, 0x00000000},
1164 {0x0000702c, 0x00000000},
1165 {0x00007030, 0x00000000},
1166 {0x00007034, 0x00000002},
1167 {0x00007038, 0x000004c2},
1168 {0x00007048, 0x00000000},
1169};
1170
1171static const u32 ar955x_1p0_common_wo_xlna_rx_gain_bounds[][5] = {
1172 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1173 {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
1174 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
1175};
1176
1177static const u32 ar955x_1p0_mac_postamble[][5] = {
1178 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1179 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
1180 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
1181 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
1182 {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
1183 {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
1184 {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
1185 {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
1186 {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
1187};
1188
1189static const u32 ar955x_1p0_common_rx_gain_bounds[][5] = {
1190 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1191 {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
1192 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302018, 0x50302018},
1193};
1194
1195static const u32 ar955x_1p0_modes_no_xpa_tx_gain_table[][9] = {
1196 /* Addr 5G_HT20_L 5G_HT40_L 5G_HT20_M 5G_HT40_M 5G_HT20_H 5G_HT40_H 2G_HT40 2G_HT20 */
1197 {0x0000a2dc, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0xfffe5aaa, 0xfffe5aaa},
1198 {0x0000a2e0, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0xfffe9ccc, 0xfffe9ccc},
1199 {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0xffffe0f0, 0xffffe0f0},
1200 {0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xffffef00, 0xffffef00},
1201 {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d8, 0x000050d8, 0x000050d8, 0x000050d8, 0x000050d7, 0x000050d7},
1202 {0x0000a500, 0x00002220, 0x00002220, 0x00002220, 0x00002220, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
1203 {0x0000a504, 0x04002222, 0x04002222, 0x04002222, 0x04002222, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
1204 {0x0000a508, 0x09002421, 0x09002421, 0x09002421, 0x09002421, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
1205 {0x0000a50c, 0x0d002621, 0x0d002621, 0x0d002621, 0x0d002621, 0x0d002621, 0x0d002621, 0x0b000006, 0x0b000006},
1206 {0x0000a510, 0x13004620, 0x13004620, 0x13004620, 0x13004620, 0x13004620, 0x13004620, 0x0f00000a, 0x0f00000a},
1207 {0x0000a514, 0x19004a20, 0x19004a20, 0x19004a20, 0x19004a20, 0x19004a20, 0x19004a20, 0x1300000c, 0x1300000c},
1208 {0x0000a518, 0x1d004e20, 0x1d004e20, 0x1d004e20, 0x1d004e20, 0x1d004e20, 0x1d004e20, 0x1700000e, 0x1700000e},
1209 {0x0000a51c, 0x21005420, 0x21005420, 0x21005420, 0x21005420, 0x21005420, 0x21005420, 0x1b000012, 0x1b000012},
1210 {0x0000a520, 0x26005e20, 0x26005e20, 0x26005e20, 0x26005e20, 0x26005e20, 0x26005e20, 0x1f00004a, 0x1f00004a},
1211 {0x0000a524, 0x2b005e40, 0x2b005e40, 0x2b005e40, 0x2b005e40, 0x2b005e40, 0x2b005e40, 0x23000244, 0x23000244},
1212 {0x0000a528, 0x2f005e42, 0x2f005e42, 0x2f005e42, 0x2f005e42, 0x2f005e42, 0x2f005e42, 0x2700022b, 0x2700022b},
1213 {0x0000a52c, 0x33005e44, 0x33005e44, 0x33005e44, 0x33005e44, 0x33005e44, 0x33005e44, 0x2b000625, 0x2b000625},
1214 {0x0000a530, 0x38005e65, 0x38005e65, 0x38005e65, 0x38005e65, 0x38005e65, 0x38005e65, 0x2f001006, 0x2f001006},
1215 {0x0000a534, 0x3c005e69, 0x3c005e69, 0x3c005e69, 0x3c005e69, 0x3c005e69, 0x3c005e69, 0x330008a0, 0x330008a0},
1216 {0x0000a538, 0x40005e6b, 0x40005e6b, 0x40005e6b, 0x40005e6b, 0x40005e6b, 0x40005e6b, 0x37000a2a, 0x37000a2a},
1217 {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x44005e6d, 0x44005e6d, 0x44005e6d, 0x44005e6d, 0x3b001c23, 0x3b001c23},
1218 {0x0000a540, 0x49005e72, 0x49005e72, 0x49005e72, 0x49005e72, 0x49005e72, 0x49005e72, 0x3f0014a0, 0x3f0014a0},
1219 {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x4e005eb2, 0x4e005eb2, 0x4e005eb2, 0x4e005eb2, 0x43001882, 0x43001882},
1220 {0x0000a548, 0x53005f12, 0x53005f12, 0x53005f12, 0x53005f12, 0x53005f12, 0x53005f12, 0x47001ca2, 0x47001ca2},
1221 {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x59025eb2, 0x59025eb2, 0x59025eb2, 0x59025eb2, 0x4b001ec3, 0x4b001ec3},
1222 {0x0000a550, 0x5e025f12, 0x5e025f12, 0x5e025f12, 0x5e025f12, 0x5e025f12, 0x5e025f12, 0x4f00148c, 0x4f00148c},
1223 {0x0000a554, 0x61027f12, 0x61027f12, 0x61027f12, 0x61027f12, 0x61027f12, 0x61027f12, 0x53001c6e, 0x53001c6e},
1224 {0x0000a558, 0x6702bf12, 0x6702bf12, 0x6702bf12, 0x6702bf12, 0x6702bf12, 0x6702bf12, 0x57001c92, 0x57001c92},
1225 {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x6b02bf14, 0x6b02bf14, 0x6b02bf14, 0x6b02bf14, 0x5c001af6, 0x5c001af6},
1226 {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x5c001af6, 0x5c001af6},
1227 {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x5c001af6, 0x5c001af6},
1228 {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x5c001af6, 0x5c001af6},
1229 {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x5c001af6, 0x5c001af6},
1230 {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x5c001af6, 0x5c001af6},
1231 {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x5c001af6, 0x5c001af6},
1232 {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x5c001af6, 0x5c001af6},
1233 {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x5c001af6, 0x5c001af6},
1234 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1235 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1236 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1237 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1238 {0x0000a610, 0x00804000, 0x00804000, 0x00804000, 0x00804000, 0x00804000, 0x00804000, 0x04005001, 0x04005001},
1239 {0x0000a614, 0x00804201, 0x00804201, 0x00804201, 0x00804201, 0x00804201, 0x00804201, 0x03808e02, 0x03808e02},
1240 {0x0000a618, 0x0280c802, 0x0280c802, 0x0280c802, 0x0280c802, 0x0280c802, 0x0280c802, 0x0300c000, 0x0300c000},
1241 {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x0280ca03, 0x0280ca03, 0x0280ca03, 0x0280ca03, 0x03808e02, 0x03808e02},
1242 {0x0000a620, 0x04c15104, 0x04c15104, 0x04c15104, 0x04c15104, 0x04c15104, 0x04c15104, 0x03410c03, 0x03410c03},
1243 {0x0000a624, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04014c03, 0x04014c03},
1244 {0x0000a628, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x05818d04, 0x05818d04},
1245 {0x0000a62c, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x0801cd04, 0x0801cd04},
1246 {0x0000a630, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x0801e007, 0x0801e007},
1247 {0x0000a634, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x0801e007, 0x0801e007},
1248 {0x0000a638, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x0801e007, 0x0801e007},
1249 {0x0000a63c, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x0801e007, 0x0801e007},
1250 {0x0000b2dc, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0xfffe5aaa, 0xfffe5aaa},
1251 {0x0000b2e0, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0xfffe9ccc, 0xfffe9ccc},
1252 {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0xffffe0f0, 0xffffe0f0},
1253 {0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xffffef00, 0xffffef00},
1254 {0x0000c2dc, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0xfffe5aaa, 0xfffe5aaa},
1255 {0x0000c2e0, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0xfffe9ccc, 0xfffe9ccc},
1256 {0x0000c2e4, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0xffffe0f0, 0xffffe0f0},
1257 {0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xffffef00, 0xffffef00},
1258 {0x00016044, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x054922d4, 0x054922d4},
1259 {0x00016048, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401},
1260 {0x00016444, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x054922d4, 0x054922d4},
1261 {0x00016448, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401},
1262 {0x00016844, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x054922d4, 0x054922d4},
1263 {0x00016848, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401},
1264};
1265
1266static const u32 ar955x_1p0_soc_postamble[][5] = {
1267 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1268 {0x00007010, 0x00000023, 0x00000023, 0x00000023, 0x00000023},
1269};
1270
1271static const u32 ar955x_1p0_modes_fast_clock[][3] = {
1272 /* Addr 5G_HT20 5G_HT40 */
1273 {0x00001030, 0x00000268, 0x000004d0},
1274 {0x00001070, 0x0000018c, 0x00000318},
1275 {0x000010b0, 0x00000fd0, 0x00001fa0},
1276 {0x00008014, 0x044c044c, 0x08980898},
1277 {0x0000801c, 0x148ec02b, 0x148ec057},
1278 {0x00008318, 0x000044c0, 0x00008980},
1279 {0x00009e00, 0x0372131c, 0x0372131c},
1280 {0x0000a230, 0x0000000b, 0x00000016},
1281 {0x0000a254, 0x00000898, 0x00001130},
1282};
1283
1284#endif /* INITVALS_955X_1P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
index 06b3f0df9fad..6e1915aee712 100644
--- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2010 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
3 * 4 *
4 * Permission to use, copy, modify, and/or distribute this software for any 5 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 6 * purpose with or without fee is hereby granted, provided that the above
@@ -19,18 +20,7 @@
19 20
20/* AR9580 1.0 */ 21/* AR9580 1.0 */
21 22
22static const u32 ar9580_1p0_modes_fast_clock[][3] = { 23#define ar9580_1p0_modes_fast_clock ar9300Modes_fast_clock_2p2
23 /* Addr 5G_HT20 5G_HT40 */
24 {0x00001030, 0x00000268, 0x000004d0},
25 {0x00001070, 0x0000018c, 0x00000318},
26 {0x000010b0, 0x00000fd0, 0x00001fa0},
27 {0x00008014, 0x044c044c, 0x08980898},
28 {0x0000801c, 0x148ec02b, 0x148ec057},
29 {0x00008318, 0x000044c0, 0x00008980},
30 {0x00009e00, 0x0372131c, 0x0372131c},
31 {0x0000a230, 0x0000000b, 0x00000016},
32 {0x0000a254, 0x00000898, 0x00001130},
33};
34 24
35static const u32 ar9580_1p0_radio_postamble[][5] = { 25static const u32 ar9580_1p0_radio_postamble[][5] = {
36 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 26 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
@@ -208,17 +198,7 @@ static const u32 ar9580_1p0_baseband_core[][2] = {
208 {0x0000c420, 0x00000000}, 198 {0x0000c420, 0x00000000},
209}; 199};
210 200
211static const u32 ar9580_1p0_mac_postamble[][5] = { 201#define ar9580_1p0_mac_postamble ar9300_2p2_mac_postamble
212 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
213 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
214 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
215 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
216 {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
217 {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
218 {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
219 {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
220 {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
221};
222 202
223static const u32 ar9580_1p0_low_ob_db_tx_gain_table[][5] = { 203static const u32 ar9580_1p0_low_ob_db_tx_gain_table[][5] = {
224 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 204 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
@@ -326,111 +306,7 @@ static const u32 ar9580_1p0_low_ob_db_tx_gain_table[][5] = {
326 {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, 306 {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
327}; 307};
328 308
329static const u32 ar9580_1p0_high_power_tx_gain_table[][5] = { 309#define ar9580_1p0_high_power_tx_gain_table ar9580_1p0_low_ob_db_tx_gain_table
330 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
331 {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
332 {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
333 {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
334 {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
335 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
336 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
337 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
338 {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
339 {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
340 {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
341 {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
342 {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402},
343 {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
344 {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
345 {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
346 {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
347 {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
348 {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
349 {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
350 {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
351 {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
352 {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
353 {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
354 {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
355 {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x47001a83, 0x47001a83},
356 {0x0000a550, 0x61024a6c, 0x61024a6c, 0x4a001c84, 0x4a001c84},
357 {0x0000a554, 0x66026a6c, 0x66026a6c, 0x4e001ce3, 0x4e001ce3},
358 {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x52001ce5, 0x52001ce5},
359 {0x0000a55c, 0x7002708c, 0x7002708c, 0x56001ce9, 0x56001ce9},
360 {0x0000a560, 0x7302b08a, 0x7302b08a, 0x5a001ceb, 0x5a001ceb},
361 {0x0000a564, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
362 {0x0000a568, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
363 {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
364 {0x0000a570, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
365 {0x0000a574, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
366 {0x0000a578, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
367 {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
368 {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
369 {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
370 {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
371 {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
372 {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
373 {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
374 {0x0000a598, 0x21802220, 0x21802220, 0x16800402, 0x16800402},
375 {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404},
376 {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
377 {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
378 {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
379 {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
380 {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
381 {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
382 {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
383 {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
384 {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
385 {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861},
386 {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81},
387 {0x0000a5cc, 0x5c82486b, 0x5c82486b, 0x47801a83, 0x47801a83},
388 {0x0000a5d0, 0x61824a6c, 0x61824a6c, 0x4a801c84, 0x4a801c84},
389 {0x0000a5d4, 0x66826a6c, 0x66826a6c, 0x4e801ce3, 0x4e801ce3},
390 {0x0000a5d8, 0x6b826e6c, 0x6b826e6c, 0x52801ce5, 0x52801ce5},
391 {0x0000a5dc, 0x7082708c, 0x7082708c, 0x56801ce9, 0x56801ce9},
392 {0x0000a5e0, 0x7382b08a, 0x7382b08a, 0x5a801ceb, 0x5a801ceb},
393 {0x0000a5e4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
394 {0x0000a5e8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
395 {0x0000a5ec, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
396 {0x0000a5f0, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
397 {0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
398 {0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
399 {0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
400 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
401 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
402 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
403 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
404 {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
405 {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
406 {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
407 {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
408 {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
409 {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
410 {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
411 {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
412 {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
413 {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
414 {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
415 {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
416 {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
417 {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
418 {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
419 {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
420 {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
421 {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
422 {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
423 {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
424 {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
425 {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
426 {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
427 {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
428 {0x00016448, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
429 {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
430 {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
431 {0x00016848, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
432 {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
433};
434 310
435static const u32 ar9580_1p0_lowest_ob_db_tx_gain_table[][5] = { 311static const u32 ar9580_1p0_lowest_ob_db_tx_gain_table[][5] = {
436 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 312 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
@@ -538,12 +414,7 @@ static const u32 ar9580_1p0_lowest_ob_db_tx_gain_table[][5] = {
538 {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, 414 {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
539}; 415};
540 416
541static const u32 ar9580_1p0_baseband_core_txfir_coeff_japan_2484[][2] = { 417#define ar9580_1p0_baseband_core_txfir_coeff_japan_2484 ar9462_2p0_baseband_core_txfir_coeff_japan_2484
542 /* Addr allmodes */
543 {0x0000a398, 0x00000000},
544 {0x0000a39c, 0x6f7f0301},
545 {0x0000a3a0, 0xca9228ee},
546};
547 418
548static const u32 ar9580_1p0_mac_core[][2] = { 419static const u32 ar9580_1p0_mac_core[][2] = {
549 /* Addr allmodes */ 420 /* Addr allmodes */
@@ -808,376 +679,11 @@ static const u32 ar9580_1p0_mixed_ob_db_tx_gain_table[][5] = {
808 {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, 679 {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
809}; 680};
810 681
811static const u32 ar9580_1p0_wo_xlna_rx_gain_table[][2] = { 682#define ar9580_1p0_wo_xlna_rx_gain_table ar9300Common_wo_xlna_rx_gain_table_2p2
812 /* Addr allmodes */
813 {0x0000a000, 0x00010000},
814 {0x0000a004, 0x00030002},
815 {0x0000a008, 0x00050004},
816 {0x0000a00c, 0x00810080},
817 {0x0000a010, 0x00830082},
818 {0x0000a014, 0x01810180},
819 {0x0000a018, 0x01830182},
820 {0x0000a01c, 0x01850184},
821 {0x0000a020, 0x01890188},
822 {0x0000a024, 0x018b018a},
823 {0x0000a028, 0x018d018c},
824 {0x0000a02c, 0x03820190},
825 {0x0000a030, 0x03840383},
826 {0x0000a034, 0x03880385},
827 {0x0000a038, 0x038a0389},
828 {0x0000a03c, 0x038c038b},
829 {0x0000a040, 0x0390038d},
830 {0x0000a044, 0x03920391},
831 {0x0000a048, 0x03940393},
832 {0x0000a04c, 0x03960395},
833 {0x0000a050, 0x00000000},
834 {0x0000a054, 0x00000000},
835 {0x0000a058, 0x00000000},
836 {0x0000a05c, 0x00000000},
837 {0x0000a060, 0x00000000},
838 {0x0000a064, 0x00000000},
839 {0x0000a068, 0x00000000},
840 {0x0000a06c, 0x00000000},
841 {0x0000a070, 0x00000000},
842 {0x0000a074, 0x00000000},
843 {0x0000a078, 0x00000000},
844 {0x0000a07c, 0x00000000},
845 {0x0000a080, 0x29292929},
846 {0x0000a084, 0x29292929},
847 {0x0000a088, 0x29292929},
848 {0x0000a08c, 0x29292929},
849 {0x0000a090, 0x22292929},
850 {0x0000a094, 0x1d1d2222},
851 {0x0000a098, 0x0c111117},
852 {0x0000a09c, 0x00030303},
853 {0x0000a0a0, 0x00000000},
854 {0x0000a0a4, 0x00000000},
855 {0x0000a0a8, 0x00000000},
856 {0x0000a0ac, 0x00000000},
857 {0x0000a0b0, 0x00000000},
858 {0x0000a0b4, 0x00000000},
859 {0x0000a0b8, 0x00000000},
860 {0x0000a0bc, 0x00000000},
861 {0x0000a0c0, 0x001f0000},
862 {0x0000a0c4, 0x01000101},
863 {0x0000a0c8, 0x011e011f},
864 {0x0000a0cc, 0x011c011d},
865 {0x0000a0d0, 0x02030204},
866 {0x0000a0d4, 0x02010202},
867 {0x0000a0d8, 0x021f0200},
868 {0x0000a0dc, 0x0302021e},
869 {0x0000a0e0, 0x03000301},
870 {0x0000a0e4, 0x031e031f},
871 {0x0000a0e8, 0x0402031d},
872 {0x0000a0ec, 0x04000401},
873 {0x0000a0f0, 0x041e041f},
874 {0x0000a0f4, 0x0502041d},
875 {0x0000a0f8, 0x05000501},
876 {0x0000a0fc, 0x051e051f},
877 {0x0000a100, 0x06010602},
878 {0x0000a104, 0x061f0600},
879 {0x0000a108, 0x061d061e},
880 {0x0000a10c, 0x07020703},
881 {0x0000a110, 0x07000701},
882 {0x0000a114, 0x00000000},
883 {0x0000a118, 0x00000000},
884 {0x0000a11c, 0x00000000},
885 {0x0000a120, 0x00000000},
886 {0x0000a124, 0x00000000},
887 {0x0000a128, 0x00000000},
888 {0x0000a12c, 0x00000000},
889 {0x0000a130, 0x00000000},
890 {0x0000a134, 0x00000000},
891 {0x0000a138, 0x00000000},
892 {0x0000a13c, 0x00000000},
893 {0x0000a140, 0x001f0000},
894 {0x0000a144, 0x01000101},
895 {0x0000a148, 0x011e011f},
896 {0x0000a14c, 0x011c011d},
897 {0x0000a150, 0x02030204},
898 {0x0000a154, 0x02010202},
899 {0x0000a158, 0x021f0200},
900 {0x0000a15c, 0x0302021e},
901 {0x0000a160, 0x03000301},
902 {0x0000a164, 0x031e031f},
903 {0x0000a168, 0x0402031d},
904 {0x0000a16c, 0x04000401},
905 {0x0000a170, 0x041e041f},
906 {0x0000a174, 0x0502041d},
907 {0x0000a178, 0x05000501},
908 {0x0000a17c, 0x051e051f},
909 {0x0000a180, 0x06010602},
910 {0x0000a184, 0x061f0600},
911 {0x0000a188, 0x061d061e},
912 {0x0000a18c, 0x07020703},
913 {0x0000a190, 0x07000701},
914 {0x0000a194, 0x00000000},
915 {0x0000a198, 0x00000000},
916 {0x0000a19c, 0x00000000},
917 {0x0000a1a0, 0x00000000},
918 {0x0000a1a4, 0x00000000},
919 {0x0000a1a8, 0x00000000},
920 {0x0000a1ac, 0x00000000},
921 {0x0000a1b0, 0x00000000},
922 {0x0000a1b4, 0x00000000},
923 {0x0000a1b8, 0x00000000},
924 {0x0000a1bc, 0x00000000},
925 {0x0000a1c0, 0x00000000},
926 {0x0000a1c4, 0x00000000},
927 {0x0000a1c8, 0x00000000},
928 {0x0000a1cc, 0x00000000},
929 {0x0000a1d0, 0x00000000},
930 {0x0000a1d4, 0x00000000},
931 {0x0000a1d8, 0x00000000},
932 {0x0000a1dc, 0x00000000},
933 {0x0000a1e0, 0x00000000},
934 {0x0000a1e4, 0x00000000},
935 {0x0000a1e8, 0x00000000},
936 {0x0000a1ec, 0x00000000},
937 {0x0000a1f0, 0x00000396},
938 {0x0000a1f4, 0x00000396},
939 {0x0000a1f8, 0x00000396},
940 {0x0000a1fc, 0x00000196},
941 {0x0000b000, 0x00010000},
942 {0x0000b004, 0x00030002},
943 {0x0000b008, 0x00050004},
944 {0x0000b00c, 0x00810080},
945 {0x0000b010, 0x00830082},
946 {0x0000b014, 0x01810180},
947 {0x0000b018, 0x01830182},
948 {0x0000b01c, 0x01850184},
949 {0x0000b020, 0x02810280},
950 {0x0000b024, 0x02830282},
951 {0x0000b028, 0x02850284},
952 {0x0000b02c, 0x02890288},
953 {0x0000b030, 0x028b028a},
954 {0x0000b034, 0x0388028c},
955 {0x0000b038, 0x038a0389},
956 {0x0000b03c, 0x038c038b},
957 {0x0000b040, 0x0390038d},
958 {0x0000b044, 0x03920391},
959 {0x0000b048, 0x03940393},
960 {0x0000b04c, 0x03960395},
961 {0x0000b050, 0x00000000},
962 {0x0000b054, 0x00000000},
963 {0x0000b058, 0x00000000},
964 {0x0000b05c, 0x00000000},
965 {0x0000b060, 0x00000000},
966 {0x0000b064, 0x00000000},
967 {0x0000b068, 0x00000000},
968 {0x0000b06c, 0x00000000},
969 {0x0000b070, 0x00000000},
970 {0x0000b074, 0x00000000},
971 {0x0000b078, 0x00000000},
972 {0x0000b07c, 0x00000000},
973 {0x0000b080, 0x32323232},
974 {0x0000b084, 0x2f2f3232},
975 {0x0000b088, 0x23282a2d},
976 {0x0000b08c, 0x1c1e2123},
977 {0x0000b090, 0x14171919},
978 {0x0000b094, 0x0e0e1214},
979 {0x0000b098, 0x03050707},
980 {0x0000b09c, 0x00030303},
981 {0x0000b0a0, 0x00000000},
982 {0x0000b0a4, 0x00000000},
983 {0x0000b0a8, 0x00000000},
984 {0x0000b0ac, 0x00000000},
985 {0x0000b0b0, 0x00000000},
986 {0x0000b0b4, 0x00000000},
987 {0x0000b0b8, 0x00000000},
988 {0x0000b0bc, 0x00000000},
989 {0x0000b0c0, 0x003f0020},
990 {0x0000b0c4, 0x00400041},
991 {0x0000b0c8, 0x0140005f},
992 {0x0000b0cc, 0x0160015f},
993 {0x0000b0d0, 0x017e017f},
994 {0x0000b0d4, 0x02410242},
995 {0x0000b0d8, 0x025f0240},
996 {0x0000b0dc, 0x027f0260},
997 {0x0000b0e0, 0x0341027e},
998 {0x0000b0e4, 0x035f0340},
999 {0x0000b0e8, 0x037f0360},
1000 {0x0000b0ec, 0x04400441},
1001 {0x0000b0f0, 0x0460045f},
1002 {0x0000b0f4, 0x0541047f},
1003 {0x0000b0f8, 0x055f0540},
1004 {0x0000b0fc, 0x057f0560},
1005 {0x0000b100, 0x06400641},
1006 {0x0000b104, 0x0660065f},
1007 {0x0000b108, 0x067e067f},
1008 {0x0000b10c, 0x07410742},
1009 {0x0000b110, 0x075f0740},
1010 {0x0000b114, 0x077f0760},
1011 {0x0000b118, 0x07800781},
1012 {0x0000b11c, 0x07a0079f},
1013 {0x0000b120, 0x07c107bf},
1014 {0x0000b124, 0x000007c0},
1015 {0x0000b128, 0x00000000},
1016 {0x0000b12c, 0x00000000},
1017 {0x0000b130, 0x00000000},
1018 {0x0000b134, 0x00000000},
1019 {0x0000b138, 0x00000000},
1020 {0x0000b13c, 0x00000000},
1021 {0x0000b140, 0x003f0020},
1022 {0x0000b144, 0x00400041},
1023 {0x0000b148, 0x0140005f},
1024 {0x0000b14c, 0x0160015f},
1025 {0x0000b150, 0x017e017f},
1026 {0x0000b154, 0x02410242},
1027 {0x0000b158, 0x025f0240},
1028 {0x0000b15c, 0x027f0260},
1029 {0x0000b160, 0x0341027e},
1030 {0x0000b164, 0x035f0340},
1031 {0x0000b168, 0x037f0360},
1032 {0x0000b16c, 0x04400441},
1033 {0x0000b170, 0x0460045f},
1034 {0x0000b174, 0x0541047f},
1035 {0x0000b178, 0x055f0540},
1036 {0x0000b17c, 0x057f0560},
1037 {0x0000b180, 0x06400641},
1038 {0x0000b184, 0x0660065f},
1039 {0x0000b188, 0x067e067f},
1040 {0x0000b18c, 0x07410742},
1041 {0x0000b190, 0x075f0740},
1042 {0x0000b194, 0x077f0760},
1043 {0x0000b198, 0x07800781},
1044 {0x0000b19c, 0x07a0079f},
1045 {0x0000b1a0, 0x07c107bf},
1046 {0x0000b1a4, 0x000007c0},
1047 {0x0000b1a8, 0x00000000},
1048 {0x0000b1ac, 0x00000000},
1049 {0x0000b1b0, 0x00000000},
1050 {0x0000b1b4, 0x00000000},
1051 {0x0000b1b8, 0x00000000},
1052 {0x0000b1bc, 0x00000000},
1053 {0x0000b1c0, 0x00000000},
1054 {0x0000b1c4, 0x00000000},
1055 {0x0000b1c8, 0x00000000},
1056 {0x0000b1cc, 0x00000000},
1057 {0x0000b1d0, 0x00000000},
1058 {0x0000b1d4, 0x00000000},
1059 {0x0000b1d8, 0x00000000},
1060 {0x0000b1dc, 0x00000000},
1061 {0x0000b1e0, 0x00000000},
1062 {0x0000b1e4, 0x00000000},
1063 {0x0000b1e8, 0x00000000},
1064 {0x0000b1ec, 0x00000000},
1065 {0x0000b1f0, 0x00000396},
1066 {0x0000b1f4, 0x00000396},
1067 {0x0000b1f8, 0x00000396},
1068 {0x0000b1fc, 0x00000196},
1069};
1070 683
1071static const u32 ar9580_1p0_soc_postamble[][5] = { 684#define ar9580_1p0_soc_postamble ar9300_2p2_soc_postamble
1072 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1073 {0x00007010, 0x00000023, 0x00000023, 0x00000023, 0x00000023},
1074};
1075 685
1076static const u32 ar9580_1p0_high_ob_db_tx_gain_table[][5] = { 686#define ar9580_1p0_high_ob_db_tx_gain_table ar9300Modes_high_ob_db_tx_gain_table_2p2
1077 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1078 {0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
1079 {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
1080 {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
1081 {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
1082 {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
1083 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
1084 {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
1085 {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
1086 {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
1087 {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
1088 {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
1089 {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
1090 {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
1091 {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
1092 {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
1093 {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
1094 {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
1095 {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
1096 {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
1097 {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
1098 {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
1099 {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
1100 {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
1101 {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
1102 {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
1103 {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
1104 {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
1105 {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
1106 {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
1107 {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
1108 {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
1109 {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
1110 {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
1111 {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
1112 {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
1113 {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
1114 {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
1115 {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
1116 {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
1117 {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
1118 {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
1119 {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
1120 {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
1121 {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
1122 {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
1123 {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
1124 {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
1125 {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
1126 {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
1127 {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
1128 {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
1129 {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
1130 {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
1131 {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
1132 {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
1133 {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
1134 {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
1135 {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
1136 {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
1137 {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
1138 {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
1139 {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
1140 {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
1141 {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
1142 {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
1143 {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
1144 {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
1145 {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
1146 {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
1147 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1148 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1149 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1150 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1151 {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
1152 {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
1153 {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
1154 {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
1155 {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
1156 {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
1157 {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
1158 {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
1159 {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
1160 {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
1161 {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
1162 {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
1163 {0x0000b2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
1164 {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
1165 {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
1166 {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
1167 {0x0000c2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
1168 {0x0000c2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
1169 {0x0000c2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
1170 {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
1171 {0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
1172 {0x00016048, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
1173 {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
1174 {0x00016444, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
1175 {0x00016448, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
1176 {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
1177 {0x00016844, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
1178 {0x00016848, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
1179 {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
1180};
1181 687
1182static const u32 ar9580_1p0_soc_preamble[][2] = { 688static const u32 ar9580_1p0_soc_preamble[][2] = {
1183 /* Addr allmodes */ 689 /* Addr allmodes */
@@ -1189,265 +695,7 @@ static const u32 ar9580_1p0_soc_preamble[][2] = {
1189 {0x00007048, 0x00000008}, 695 {0x00007048, 0x00000008},
1190}; 696};
1191 697
1192static const u32 ar9580_1p0_rx_gain_table[][2] = { 698#define ar9580_1p0_rx_gain_table ar9462_common_rx_gain_table_2p0
1193 /* Addr allmodes */
1194 {0x0000a000, 0x00010000},
1195 {0x0000a004, 0x00030002},
1196 {0x0000a008, 0x00050004},
1197 {0x0000a00c, 0x00810080},
1198 {0x0000a010, 0x00830082},
1199 {0x0000a014, 0x01810180},
1200 {0x0000a018, 0x01830182},
1201 {0x0000a01c, 0x01850184},
1202 {0x0000a020, 0x01890188},
1203 {0x0000a024, 0x018b018a},
1204 {0x0000a028, 0x018d018c},
1205 {0x0000a02c, 0x01910190},
1206 {0x0000a030, 0x01930192},
1207 {0x0000a034, 0x01950194},
1208 {0x0000a038, 0x038a0196},
1209 {0x0000a03c, 0x038c038b},
1210 {0x0000a040, 0x0390038d},
1211 {0x0000a044, 0x03920391},
1212 {0x0000a048, 0x03940393},
1213 {0x0000a04c, 0x03960395},
1214 {0x0000a050, 0x00000000},
1215 {0x0000a054, 0x00000000},
1216 {0x0000a058, 0x00000000},
1217 {0x0000a05c, 0x00000000},
1218 {0x0000a060, 0x00000000},
1219 {0x0000a064, 0x00000000},
1220 {0x0000a068, 0x00000000},
1221 {0x0000a06c, 0x00000000},
1222 {0x0000a070, 0x00000000},
1223 {0x0000a074, 0x00000000},
1224 {0x0000a078, 0x00000000},
1225 {0x0000a07c, 0x00000000},
1226 {0x0000a080, 0x22222229},
1227 {0x0000a084, 0x1d1d1d1d},
1228 {0x0000a088, 0x1d1d1d1d},
1229 {0x0000a08c, 0x1d1d1d1d},
1230 {0x0000a090, 0x171d1d1d},
1231 {0x0000a094, 0x11111717},
1232 {0x0000a098, 0x00030311},
1233 {0x0000a09c, 0x00000000},
1234 {0x0000a0a0, 0x00000000},
1235 {0x0000a0a4, 0x00000000},
1236 {0x0000a0a8, 0x00000000},
1237 {0x0000a0ac, 0x00000000},
1238 {0x0000a0b0, 0x00000000},
1239 {0x0000a0b4, 0x00000000},
1240 {0x0000a0b8, 0x00000000},
1241 {0x0000a0bc, 0x00000000},
1242 {0x0000a0c0, 0x001f0000},
1243 {0x0000a0c4, 0x01000101},
1244 {0x0000a0c8, 0x011e011f},
1245 {0x0000a0cc, 0x011c011d},
1246 {0x0000a0d0, 0x02030204},
1247 {0x0000a0d4, 0x02010202},
1248 {0x0000a0d8, 0x021f0200},
1249 {0x0000a0dc, 0x0302021e},
1250 {0x0000a0e0, 0x03000301},
1251 {0x0000a0e4, 0x031e031f},
1252 {0x0000a0e8, 0x0402031d},
1253 {0x0000a0ec, 0x04000401},
1254 {0x0000a0f0, 0x041e041f},
1255 {0x0000a0f4, 0x0502041d},
1256 {0x0000a0f8, 0x05000501},
1257 {0x0000a0fc, 0x051e051f},
1258 {0x0000a100, 0x06010602},
1259 {0x0000a104, 0x061f0600},
1260 {0x0000a108, 0x061d061e},
1261 {0x0000a10c, 0x07020703},
1262 {0x0000a110, 0x07000701},
1263 {0x0000a114, 0x00000000},
1264 {0x0000a118, 0x00000000},
1265 {0x0000a11c, 0x00000000},
1266 {0x0000a120, 0x00000000},
1267 {0x0000a124, 0x00000000},
1268 {0x0000a128, 0x00000000},
1269 {0x0000a12c, 0x00000000},
1270 {0x0000a130, 0x00000000},
1271 {0x0000a134, 0x00000000},
1272 {0x0000a138, 0x00000000},
1273 {0x0000a13c, 0x00000000},
1274 {0x0000a140, 0x001f0000},
1275 {0x0000a144, 0x01000101},
1276 {0x0000a148, 0x011e011f},
1277 {0x0000a14c, 0x011c011d},
1278 {0x0000a150, 0x02030204},
1279 {0x0000a154, 0x02010202},
1280 {0x0000a158, 0x021f0200},
1281 {0x0000a15c, 0x0302021e},
1282 {0x0000a160, 0x03000301},
1283 {0x0000a164, 0x031e031f},
1284 {0x0000a168, 0x0402031d},
1285 {0x0000a16c, 0x04000401},
1286 {0x0000a170, 0x041e041f},
1287 {0x0000a174, 0x0502041d},
1288 {0x0000a178, 0x05000501},
1289 {0x0000a17c, 0x051e051f},
1290 {0x0000a180, 0x06010602},
1291 {0x0000a184, 0x061f0600},
1292 {0x0000a188, 0x061d061e},
1293 {0x0000a18c, 0x07020703},
1294 {0x0000a190, 0x07000701},
1295 {0x0000a194, 0x00000000},
1296 {0x0000a198, 0x00000000},
1297 {0x0000a19c, 0x00000000},
1298 {0x0000a1a0, 0x00000000},
1299 {0x0000a1a4, 0x00000000},
1300 {0x0000a1a8, 0x00000000},
1301 {0x0000a1ac, 0x00000000},
1302 {0x0000a1b0, 0x00000000},
1303 {0x0000a1b4, 0x00000000},
1304 {0x0000a1b8, 0x00000000},
1305 {0x0000a1bc, 0x00000000},
1306 {0x0000a1c0, 0x00000000},
1307 {0x0000a1c4, 0x00000000},
1308 {0x0000a1c8, 0x00000000},
1309 {0x0000a1cc, 0x00000000},
1310 {0x0000a1d0, 0x00000000},
1311 {0x0000a1d4, 0x00000000},
1312 {0x0000a1d8, 0x00000000},
1313 {0x0000a1dc, 0x00000000},
1314 {0x0000a1e0, 0x00000000},
1315 {0x0000a1e4, 0x00000000},
1316 {0x0000a1e8, 0x00000000},
1317 {0x0000a1ec, 0x00000000},
1318 {0x0000a1f0, 0x00000396},
1319 {0x0000a1f4, 0x00000396},
1320 {0x0000a1f8, 0x00000396},
1321 {0x0000a1fc, 0x00000196},
1322 {0x0000b000, 0x00010000},
1323 {0x0000b004, 0x00030002},
1324 {0x0000b008, 0x00050004},
1325 {0x0000b00c, 0x00810080},
1326 {0x0000b010, 0x00830082},
1327 {0x0000b014, 0x01810180},
1328 {0x0000b018, 0x01830182},
1329 {0x0000b01c, 0x01850184},
1330 {0x0000b020, 0x02810280},
1331 {0x0000b024, 0x02830282},
1332 {0x0000b028, 0x02850284},
1333 {0x0000b02c, 0x02890288},
1334 {0x0000b030, 0x028b028a},
1335 {0x0000b034, 0x0388028c},
1336 {0x0000b038, 0x038a0389},
1337 {0x0000b03c, 0x038c038b},
1338 {0x0000b040, 0x0390038d},
1339 {0x0000b044, 0x03920391},
1340 {0x0000b048, 0x03940393},
1341 {0x0000b04c, 0x03960395},
1342 {0x0000b050, 0x00000000},
1343 {0x0000b054, 0x00000000},
1344 {0x0000b058, 0x00000000},
1345 {0x0000b05c, 0x00000000},
1346 {0x0000b060, 0x00000000},
1347 {0x0000b064, 0x00000000},
1348 {0x0000b068, 0x00000000},
1349 {0x0000b06c, 0x00000000},
1350 {0x0000b070, 0x00000000},
1351 {0x0000b074, 0x00000000},
1352 {0x0000b078, 0x00000000},
1353 {0x0000b07c, 0x00000000},
1354 {0x0000b080, 0x2a2d2f32},
1355 {0x0000b084, 0x21232328},
1356 {0x0000b088, 0x19191c1e},
1357 {0x0000b08c, 0x12141417},
1358 {0x0000b090, 0x07070e0e},
1359 {0x0000b094, 0x03030305},
1360 {0x0000b098, 0x00000003},
1361 {0x0000b09c, 0x00000000},
1362 {0x0000b0a0, 0x00000000},
1363 {0x0000b0a4, 0x00000000},
1364 {0x0000b0a8, 0x00000000},
1365 {0x0000b0ac, 0x00000000},
1366 {0x0000b0b0, 0x00000000},
1367 {0x0000b0b4, 0x00000000},
1368 {0x0000b0b8, 0x00000000},
1369 {0x0000b0bc, 0x00000000},
1370 {0x0000b0c0, 0x003f0020},
1371 {0x0000b0c4, 0x00400041},
1372 {0x0000b0c8, 0x0140005f},
1373 {0x0000b0cc, 0x0160015f},
1374 {0x0000b0d0, 0x017e017f},
1375 {0x0000b0d4, 0x02410242},
1376 {0x0000b0d8, 0x025f0240},
1377 {0x0000b0dc, 0x027f0260},
1378 {0x0000b0e0, 0x0341027e},
1379 {0x0000b0e4, 0x035f0340},
1380 {0x0000b0e8, 0x037f0360},
1381 {0x0000b0ec, 0x04400441},
1382 {0x0000b0f0, 0x0460045f},
1383 {0x0000b0f4, 0x0541047f},
1384 {0x0000b0f8, 0x055f0540},
1385 {0x0000b0fc, 0x057f0560},
1386 {0x0000b100, 0x06400641},
1387 {0x0000b104, 0x0660065f},
1388 {0x0000b108, 0x067e067f},
1389 {0x0000b10c, 0x07410742},
1390 {0x0000b110, 0x075f0740},
1391 {0x0000b114, 0x077f0760},
1392 {0x0000b118, 0x07800781},
1393 {0x0000b11c, 0x07a0079f},
1394 {0x0000b120, 0x07c107bf},
1395 {0x0000b124, 0x000007c0},
1396 {0x0000b128, 0x00000000},
1397 {0x0000b12c, 0x00000000},
1398 {0x0000b130, 0x00000000},
1399 {0x0000b134, 0x00000000},
1400 {0x0000b138, 0x00000000},
1401 {0x0000b13c, 0x00000000},
1402 {0x0000b140, 0x003f0020},
1403 {0x0000b144, 0x00400041},
1404 {0x0000b148, 0x0140005f},
1405 {0x0000b14c, 0x0160015f},
1406 {0x0000b150, 0x017e017f},
1407 {0x0000b154, 0x02410242},
1408 {0x0000b158, 0x025f0240},
1409 {0x0000b15c, 0x027f0260},
1410 {0x0000b160, 0x0341027e},
1411 {0x0000b164, 0x035f0340},
1412 {0x0000b168, 0x037f0360},
1413 {0x0000b16c, 0x04400441},
1414 {0x0000b170, 0x0460045f},
1415 {0x0000b174, 0x0541047f},
1416 {0x0000b178, 0x055f0540},
1417 {0x0000b17c, 0x057f0560},
1418 {0x0000b180, 0x06400641},
1419 {0x0000b184, 0x0660065f},
1420 {0x0000b188, 0x067e067f},
1421 {0x0000b18c, 0x07410742},
1422 {0x0000b190, 0x075f0740},
1423 {0x0000b194, 0x077f0760},
1424 {0x0000b198, 0x07800781},
1425 {0x0000b19c, 0x07a0079f},
1426 {0x0000b1a0, 0x07c107bf},
1427 {0x0000b1a4, 0x000007c0},
1428 {0x0000b1a8, 0x00000000},
1429 {0x0000b1ac, 0x00000000},
1430 {0x0000b1b0, 0x00000000},
1431 {0x0000b1b4, 0x00000000},
1432 {0x0000b1b8, 0x00000000},
1433 {0x0000b1bc, 0x00000000},
1434 {0x0000b1c0, 0x00000000},
1435 {0x0000b1c4, 0x00000000},
1436 {0x0000b1c8, 0x00000000},
1437 {0x0000b1cc, 0x00000000},
1438 {0x0000b1d0, 0x00000000},
1439 {0x0000b1d4, 0x00000000},
1440 {0x0000b1d8, 0x00000000},
1441 {0x0000b1dc, 0x00000000},
1442 {0x0000b1e0, 0x00000000},
1443 {0x0000b1e4, 0x00000000},
1444 {0x0000b1e8, 0x00000000},
1445 {0x0000b1ec, 0x00000000},
1446 {0x0000b1f0, 0x00000396},
1447 {0x0000b1f4, 0x00000396},
1448 {0x0000b1f8, 0x00000396},
1449 {0x0000b1fc, 0x00000196},
1450};
1451 699
1452static const u32 ar9580_1p0_radio_core[][2] = { 700static const u32 ar9580_1p0_radio_core[][2] = {
1453 /* Addr allmodes */ 701 /* Addr allmodes */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 4866550ddd96..79840d6deef2 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -308,6 +308,7 @@ struct ath_rx {
308 u8 defant; 308 u8 defant;
309 u8 rxotherant; 309 u8 rxotherant;
310 u32 *rxlink; 310 u32 *rxlink;
311 u32 num_pkts;
311 unsigned int rxfilter; 312 unsigned int rxfilter;
312 spinlock_t rxbuflock; 313 spinlock_t rxbuflock;
313 struct list_head rxbuf; 314 struct list_head rxbuf;
@@ -326,6 +327,9 @@ int ath_rx_init(struct ath_softc *sc, int nbufs);
326void ath_rx_cleanup(struct ath_softc *sc); 327void ath_rx_cleanup(struct ath_softc *sc);
327int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp); 328int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp);
328struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype); 329struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype);
330void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq);
331void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq);
332void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq);
329void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq); 333void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
330bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx); 334bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx);
331void ath_draintxq(struct ath_softc *sc, 335void ath_draintxq(struct ath_softc *sc,
@@ -415,9 +419,9 @@ int ath_beaconq_config(struct ath_softc *sc);
415void ath_set_beacon(struct ath_softc *sc); 419void ath_set_beacon(struct ath_softc *sc);
416void ath9k_set_beaconing_status(struct ath_softc *sc, bool status); 420void ath9k_set_beaconing_status(struct ath_softc *sc, bool status);
417 421
418/*******/ 422/*******************/
419/* ANI */ 423/* Link Monitoring */
420/*******/ 424/*******************/
421 425
422#define ATH_STA_SHORT_CALINTERVAL 1000 /* 1 second */ 426#define ATH_STA_SHORT_CALINTERVAL 1000 /* 1 second */
423#define ATH_AP_SHORT_CALINTERVAL 100 /* 100 ms */ 427#define ATH_AP_SHORT_CALINTERVAL 100 /* 100 ms */
@@ -428,7 +432,9 @@ void ath9k_set_beaconing_status(struct ath_softc *sc, bool status);
428#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */ 432#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
429 433
430#define ATH_PAPRD_TIMEOUT 100 /* msecs */ 434#define ATH_PAPRD_TIMEOUT 100 /* msecs */
435#define ATH_PLL_WORK_INTERVAL 100
431 436
437void ath_tx_complete_poll_work(struct work_struct *work);
432void ath_reset_work(struct work_struct *work); 438void ath_reset_work(struct work_struct *work);
433void ath_hw_check(struct work_struct *work); 439void ath_hw_check(struct work_struct *work);
434void ath_hw_pll_work(struct work_struct *work); 440void ath_hw_pll_work(struct work_struct *work);
@@ -437,22 +443,31 @@ void ath_start_rx_poll(struct ath_softc *sc, u8 nbeacon);
437void ath_paprd_calibrate(struct work_struct *work); 443void ath_paprd_calibrate(struct work_struct *work);
438void ath_ani_calibrate(unsigned long data); 444void ath_ani_calibrate(unsigned long data);
439void ath_start_ani(struct ath_common *common); 445void ath_start_ani(struct ath_common *common);
446int ath_update_survey_stats(struct ath_softc *sc);
447void ath_update_survey_nf(struct ath_softc *sc, int channel);
440 448
441/**********/ 449/**********/
442/* BTCOEX */ 450/* BTCOEX */
443/**********/ 451/**********/
444 452
453enum bt_op_flags {
454 BT_OP_PRIORITY_DETECTED,
455 BT_OP_SCAN,
456};
457
445struct ath_btcoex { 458struct ath_btcoex {
446 bool hw_timer_enabled; 459 bool hw_timer_enabled;
447 spinlock_t btcoex_lock; 460 spinlock_t btcoex_lock;
448 struct timer_list period_timer; /* Timer for BT period */ 461 struct timer_list period_timer; /* Timer for BT period */
449 u32 bt_priority_cnt; 462 u32 bt_priority_cnt;
450 unsigned long bt_priority_time; 463 unsigned long bt_priority_time;
464 unsigned long op_flags;
451 int bt_stomp_type; /* Types of BT stomping */ 465 int bt_stomp_type; /* Types of BT stomping */
452 u32 btcoex_no_stomp; /* in usec */ 466 u32 btcoex_no_stomp; /* in usec */
453 u32 btcoex_period; /* in usec */ 467 u32 btcoex_period; /* in usec */
454 u32 btscan_no_stomp; /* in usec */ 468 u32 btscan_no_stomp; /* in usec */
455 u32 duty_cycle; 469 u32 duty_cycle;
470 u32 bt_wait_time;
456 struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */ 471 struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */
457 struct ath_mci_profile mci; 472 struct ath_mci_profile mci;
458}; 473};
@@ -466,6 +481,7 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc);
466void ath9k_btcoex_timer_pause(struct ath_softc *sc); 481void ath9k_btcoex_timer_pause(struct ath_softc *sc);
467void ath9k_btcoex_handle_interrupt(struct ath_softc *sc, u32 status); 482void ath9k_btcoex_handle_interrupt(struct ath_softc *sc, u32 status);
468u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen); 483u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen);
484void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc);
469#else 485#else
470static inline int ath9k_init_btcoex(struct ath_softc *sc) 486static inline int ath9k_init_btcoex(struct ath_softc *sc)
471{ 487{
@@ -489,6 +505,9 @@ static inline u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc,
489{ 505{
490 return 0; 506 return 0;
491} 507}
508static inline void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc)
509{
510}
492#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */ 511#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
493 512
494/********************/ 513/********************/
@@ -514,8 +533,10 @@ static inline void ath_deinit_leds(struct ath_softc *sc)
514} 533}
515#endif 534#endif
516 535
517 536/*******************************/
518/* Antenna diversity/combining */ 537/* Antenna diversity/combining */
538/*******************************/
539
519#define ATH_ANT_RX_CURRENT_SHIFT 4 540#define ATH_ANT_RX_CURRENT_SHIFT 4
520#define ATH_ANT_RX_MAIN_SHIFT 2 541#define ATH_ANT_RX_MAIN_SHIFT 2
521#define ATH_ANT_RX_MASK 0x3 542#define ATH_ANT_RX_MASK 0x3
@@ -568,6 +589,9 @@ struct ath_ant_comb {
568 unsigned long scan_start_time; 589 unsigned long scan_start_time;
569}; 590};
570 591
592void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs);
593void ath_ant_comb_update(struct ath_softc *sc);
594
571/********************/ 595/********************/
572/* Main driver core */ 596/* Main driver core */
573/********************/ 597/********************/
@@ -585,15 +609,15 @@ struct ath_ant_comb {
585#define ATH_TXPOWER_MAX 100 /* .5 dBm units */ 609#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
586#define ATH_RATE_DUMMY_MARKER 0 610#define ATH_RATE_DUMMY_MARKER 0
587 611
588#define SC_OP_INVALID BIT(0) 612enum sc_op_flags {
589#define SC_OP_BEACONS BIT(1) 613 SC_OP_INVALID,
590#define SC_OP_OFFCHANNEL BIT(2) 614 SC_OP_BEACONS,
591#define SC_OP_RXFLUSH BIT(3) 615 SC_OP_RXFLUSH,
592#define SC_OP_TSF_RESET BIT(4) 616 SC_OP_TSF_RESET,
593#define SC_OP_BT_PRIORITY_DETECTED BIT(5) 617 SC_OP_ANI_RUN,
594#define SC_OP_BT_SCAN BIT(6) 618 SC_OP_PRIM_STA_VIF,
595#define SC_OP_ANI_RUN BIT(7) 619 SC_OP_HW_RESET,
596#define SC_OP_PRIM_STA_VIF BIT(8) 620};
597 621
598/* Powersave flags */ 622/* Powersave flags */
599#define PS_WAIT_FOR_BEACON BIT(0) 623#define PS_WAIT_FOR_BEACON BIT(0)
@@ -639,9 +663,9 @@ struct ath_softc {
639 struct completion paprd_complete; 663 struct completion paprd_complete;
640 664
641 unsigned int hw_busy_count; 665 unsigned int hw_busy_count;
666 unsigned long sc_flags;
642 667
643 u32 intrstatus; 668 u32 intrstatus;
644 u32 sc_flags; /* SC_OP_* */
645 u16 ps_flags; /* PS_* */ 669 u16 ps_flags; /* PS_* */
646 u16 curtxpow; 670 u16 curtxpow;
647 bool ps_enabled; 671 bool ps_enabled;
@@ -679,6 +703,7 @@ struct ath_softc {
679#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT 703#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
680 struct ath_btcoex btcoex; 704 struct ath_btcoex btcoex;
681 struct ath_mci_coex mci_coex; 705 struct ath_mci_coex mci_coex;
706 struct work_struct mci_work;
682#endif 707#endif
683 708
684 struct ath_descdma txsdma; 709 struct ath_descdma txsdma;
@@ -701,6 +726,7 @@ extern int ath9k_modparam_nohwcrypt;
701extern int led_blink; 726extern int led_blink;
702extern bool is_ath9k_unloaded; 727extern bool is_ath9k_unloaded;
703 728
729u8 ath9k_parse_mpdudensity(u8 mpdudensity);
704irqreturn_t ath_isr(int irq, void *dev); 730irqreturn_t ath_isr(int irq, void *dev);
705int ath9k_init_device(u16 devid, struct ath_softc *sc, 731int ath9k_init_device(u16 devid, struct ath_softc *sc,
706 const struct ath_bus_ops *bus_ops); 732 const struct ath_bus_ops *bus_ops);
@@ -737,5 +763,4 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
737 struct ieee80211_vif *vif, 763 struct ieee80211_vif *vif,
738 struct ath9k_vif_iter_data *iter_data); 764 struct ath9k_vif_iter_data *iter_data);
739 765
740
741#endif /* ATH9K_H */ 766#endif /* ATH9K_H */
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 11bc55e3d697..40775da8941e 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -48,7 +48,10 @@ int ath_beaconq_config(struct ath_softc *sc)
48 txq = sc->tx.txq_map[WME_AC_BE]; 48 txq = sc->tx.txq_map[WME_AC_BE];
49 ath9k_hw_get_txq_props(ah, txq->axq_qnum, &qi_be); 49 ath9k_hw_get_txq_props(ah, txq->axq_qnum, &qi_be);
50 qi.tqi_aifs = qi_be.tqi_aifs; 50 qi.tqi_aifs = qi_be.tqi_aifs;
51 qi.tqi_cwmin = 4*qi_be.tqi_cwmin; 51 if (ah->slottime == ATH9K_SLOT_TIME_20)
52 qi.tqi_cwmin = 2*qi_be.tqi_cwmin;
53 else
54 qi.tqi_cwmin = 4*qi_be.tqi_cwmin;
52 qi.tqi_cwmax = qi_be.tqi_cwmax; 55 qi.tqi_cwmax = qi_be.tqi_cwmax;
53 } 56 }
54 57
@@ -387,7 +390,7 @@ void ath_beacon_tasklet(unsigned long data)
387 } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) { 390 } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) {
388 ath_dbg(common, BSTUCK, "beacon is officially stuck\n"); 391 ath_dbg(common, BSTUCK, "beacon is officially stuck\n");
389 sc->beacon.bmisscnt = 0; 392 sc->beacon.bmisscnt = 0;
390 sc->sc_flags |= SC_OP_TSF_RESET; 393 set_bit(SC_OP_TSF_RESET, &sc->sc_flags);
391 ieee80211_queue_work(sc->hw, &sc->hw_reset_work); 394 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
392 } 395 }
393 396
@@ -477,16 +480,16 @@ static void ath9k_beacon_init(struct ath_softc *sc,
477 u32 next_beacon, 480 u32 next_beacon,
478 u32 beacon_period) 481 u32 beacon_period)
479{ 482{
480 if (sc->sc_flags & SC_OP_TSF_RESET) { 483 if (test_bit(SC_OP_TSF_RESET, &sc->sc_flags)) {
481 ath9k_ps_wakeup(sc); 484 ath9k_ps_wakeup(sc);
482 ath9k_hw_reset_tsf(sc->sc_ah); 485 ath9k_hw_reset_tsf(sc->sc_ah);
483 } 486 }
484 487
485 ath9k_hw_beaconinit(sc->sc_ah, next_beacon, beacon_period); 488 ath9k_hw_beaconinit(sc->sc_ah, next_beacon, beacon_period);
486 489
487 if (sc->sc_flags & SC_OP_TSF_RESET) { 490 if (test_bit(SC_OP_TSF_RESET, &sc->sc_flags)) {
488 ath9k_ps_restore(sc); 491 ath9k_ps_restore(sc);
489 sc->sc_flags &= ~SC_OP_TSF_RESET; 492 clear_bit(SC_OP_TSF_RESET, &sc->sc_flags);
490 } 493 }
491} 494}
492 495
@@ -516,7 +519,7 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
516 /* Set the computed AP beacon timers */ 519 /* Set the computed AP beacon timers */
517 520
518 ath9k_hw_disable_interrupts(ah); 521 ath9k_hw_disable_interrupts(ah);
519 sc->sc_flags |= SC_OP_TSF_RESET; 522 set_bit(SC_OP_TSF_RESET, &sc->sc_flags);
520 ath9k_beacon_init(sc, nexttbtt, intval); 523 ath9k_beacon_init(sc, nexttbtt, intval);
521 sc->beacon.bmisscnt = 0; 524 sc->beacon.bmisscnt = 0;
522 ath9k_hw_set_interrupts(ah); 525 ath9k_hw_set_interrupts(ah);
@@ -659,7 +662,7 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
659 u32 tsf, intval, nexttbtt; 662 u32 tsf, intval, nexttbtt;
660 663
661 ath9k_reset_beacon_status(sc); 664 ath9k_reset_beacon_status(sc);
662 if (!(sc->sc_flags & SC_OP_BEACONS)) 665 if (!test_bit(SC_OP_BEACONS, &sc->sc_flags))
663 ath9k_hw_settsf64(ah, sc->beacon.bc_tstamp); 666 ath9k_hw_settsf64(ah, sc->beacon.bc_tstamp);
664 667
665 intval = TU_TO_USEC(conf->beacon_interval); 668 intval = TU_TO_USEC(conf->beacon_interval);
@@ -724,7 +727,7 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc,
724 */ 727 */
725 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && 728 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
726 (vif->type == NL80211_IFTYPE_STATION) && 729 (vif->type == NL80211_IFTYPE_STATION) &&
727 (sc->sc_flags & SC_OP_BEACONS) && 730 test_bit(SC_OP_BEACONS, &sc->sc_flags) &&
728 !avp->primary_sta_vif) { 731 !avp->primary_sta_vif) {
729 ath_dbg(common, CONFIG, 732 ath_dbg(common, CONFIG,
730 "Beacon already configured for a station interface\n"); 733 "Beacon already configured for a station interface\n");
@@ -810,7 +813,7 @@ void ath_set_beacon(struct ath_softc *sc)
810 return; 813 return;
811 } 814 }
812 815
813 sc->sc_flags |= SC_OP_BEACONS; 816 set_bit(SC_OP_BEACONS, &sc->sc_flags);
814} 817}
815 818
816void ath9k_set_beaconing_status(struct ath_softc *sc, bool status) 819void ath9k_set_beaconing_status(struct ath_softc *sc, bool status)
@@ -818,7 +821,7 @@ void ath9k_set_beaconing_status(struct ath_softc *sc, bool status)
818 struct ath_hw *ah = sc->sc_ah; 821 struct ath_hw *ah = sc->sc_ah;
819 822
820 if (!ath_has_valid_bslot(sc)) { 823 if (!ath_has_valid_bslot(sc)) {
821 sc->sc_flags &= ~SC_OP_BEACONS; 824 clear_bit(SC_OP_BEACONS, &sc->sc_flags);
822 return; 825 return;
823 } 826 }
824 827
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index 1ca6da80d4ad..acd437384fe4 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -336,10 +336,16 @@ static void ar9003_btcoex_bt_stomp(struct ath_hw *ah,
336 enum ath_stomp_type stomp_type) 336 enum ath_stomp_type stomp_type)
337{ 337{
338 struct ath_btcoex_hw *btcoex = &ah->btcoex_hw; 338 struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
339 const u32 *weight = AR_SREV_9462(ah) ? ar9003_wlan_weights[stomp_type] : 339 const u32 *weight = ar9003_wlan_weights[stomp_type];
340 ar9462_wlan_weights[stomp_type];
341 int i; 340 int i;
342 341
342 if (AR_SREV_9462(ah)) {
343 if ((stomp_type == ATH_BTCOEX_STOMP_LOW) &&
344 btcoex->mci.stomp_ftp)
345 stomp_type = ATH_BTCOEX_STOMP_LOW_FTP;
346 weight = ar9462_wlan_weights[stomp_type];
347 }
348
343 for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) { 349 for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) {
344 btcoex->bt_weight[i] = AR9300_BT_WGHT; 350 btcoex->bt_weight[i] = AR9300_BT_WGHT;
345 btcoex->wlan_weight[i] = weight[i]; 351 btcoex->wlan_weight[i] = weight[i];
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index 3a1e1cfabd5e..20092f98658f 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -36,6 +36,9 @@
36#define ATH_BT_CNT_THRESHOLD 3 36#define ATH_BT_CNT_THRESHOLD 3
37#define ATH_BT_CNT_SCAN_THRESHOLD 15 37#define ATH_BT_CNT_SCAN_THRESHOLD 15
38 38
39#define ATH_BTCOEX_RX_WAIT_TIME 100
40#define ATH_BTCOEX_STOMP_FTP_THRESH 5
41
39#define AR9300_NUM_BT_WEIGHTS 4 42#define AR9300_NUM_BT_WEIGHTS 4
40#define AR9300_NUM_WLAN_WEIGHTS 4 43#define AR9300_NUM_WLAN_WEIGHTS 4
41/* Defines the BT AR_BT_COEX_WGHT used */ 44/* Defines the BT AR_BT_COEX_WGHT used */
@@ -80,6 +83,7 @@ struct ath9k_hw_mci {
80 u8 bt_ver_major; 83 u8 bt_ver_major;
81 u8 bt_ver_minor; 84 u8 bt_ver_minor;
82 u8 bt_state; 85 u8 bt_state;
86 u8 stomp_ftp;
83}; 87};
84 88
85struct ath_btcoex_hw { 89struct ath_btcoex_hw {
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index fde700c4e490..5c3192ffc196 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -205,10 +205,10 @@ static ssize_t write_file_disable_ani(struct file *file,
205 common->disable_ani = !!disable_ani; 205 common->disable_ani = !!disable_ani;
206 206
207 if (disable_ani) { 207 if (disable_ani) {
208 sc->sc_flags &= ~SC_OP_ANI_RUN; 208 clear_bit(SC_OP_ANI_RUN, &sc->sc_flags);
209 del_timer_sync(&common->ani.timer); 209 del_timer_sync(&common->ani.timer);
210 } else { 210 } else {
211 sc->sc_flags |= SC_OP_ANI_RUN; 211 set_bit(SC_OP_ANI_RUN, &sc->sc_flags);
212 ath_start_ani(common); 212 ath_start_ani(common);
213 } 213 }
214 214
@@ -348,8 +348,6 @@ void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status)
348 sc->debug.stats.istats.txok++; 348 sc->debug.stats.istats.txok++;
349 if (status & ATH9K_INT_TXURN) 349 if (status & ATH9K_INT_TXURN)
350 sc->debug.stats.istats.txurn++; 350 sc->debug.stats.istats.txurn++;
351 if (status & ATH9K_INT_MIB)
352 sc->debug.stats.istats.mib++;
353 if (status & ATH9K_INT_RXPHY) 351 if (status & ATH9K_INT_RXPHY)
354 sc->debug.stats.istats.rxphyerr++; 352 sc->debug.stats.istats.rxphyerr++;
355 if (status & ATH9K_INT_RXKCM) 353 if (status & ATH9K_INT_RXKCM)
@@ -374,6 +372,8 @@ void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status)
374 sc->debug.stats.istats.dtim++; 372 sc->debug.stats.istats.dtim++;
375 if (status & ATH9K_INT_TSFOOR) 373 if (status & ATH9K_INT_TSFOOR)
376 sc->debug.stats.istats.tsfoor++; 374 sc->debug.stats.istats.tsfoor++;
375 if (status & ATH9K_INT_MCI)
376 sc->debug.stats.istats.mci++;
377} 377}
378 378
379static ssize_t read_file_interrupt(struct file *file, char __user *user_buf, 379static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
@@ -418,6 +418,7 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
418 PR_IS("DTIMSYNC", dtimsync); 418 PR_IS("DTIMSYNC", dtimsync);
419 PR_IS("DTIM", dtim); 419 PR_IS("DTIM", dtim);
420 PR_IS("TSFOOR", tsfoor); 420 PR_IS("TSFOOR", tsfoor);
421 PR_IS("MCI", mci);
421 PR_IS("TOTAL", total); 422 PR_IS("TOTAL", total);
422 423
423 len += snprintf(buf + len, mxlen - len, 424 len += snprintf(buf + len, mxlen - len,
@@ -1318,7 +1319,7 @@ static int open_file_bb_mac_samps(struct inode *inode, struct file *file)
1318 u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask; 1319 u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
1319 u8 nread; 1320 u8 nread;
1320 1321
1321 if (sc->sc_flags & SC_OP_INVALID) 1322 if (test_bit(SC_OP_INVALID, &sc->sc_flags))
1322 return -EAGAIN; 1323 return -EAGAIN;
1323 1324
1324 buf = vmalloc(size); 1325 buf = vmalloc(size);
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index c34da09d9103..d0f851cea43a 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -86,6 +86,7 @@ struct ath_interrupt_stats {
86 u32 dtim; 86 u32 dtim;
87 u32 bb_watchdog; 87 u32 bb_watchdog;
88 u32 tsfoor; 88 u32 tsfoor;
89 u32 mci;
89 90
90 /* Sync-cause stats */ 91 /* Sync-cause stats */
91 u32 sync_cause_all; 92 u32 sync_cause_all;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 4322ac80c203..7d075105a85d 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -135,7 +135,7 @@ static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
135 if (!dump_base_hdr) { 135 if (!dump_base_hdr) {
136 len += snprintf(buf + len, size - len, 136 len += snprintf(buf + len, size - len,
137 "%20s :\n", "2GHz modal Header"); 137 "%20s :\n", "2GHz modal Header");
138 len += ath9k_dump_4k_modal_eeprom(buf, len, size, 138 len = ath9k_dump_4k_modal_eeprom(buf, len, size,
139 &eep->modalHeader); 139 &eep->modalHeader);
140 goto out; 140 goto out;
141 } 141 }
@@ -188,8 +188,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
188{ 188{
189#define EEPROM_4K_SIZE (sizeof(struct ar5416_eeprom_4k) / sizeof(u16)) 189#define EEPROM_4K_SIZE (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
190 struct ath_common *common = ath9k_hw_common(ah); 190 struct ath_common *common = ath9k_hw_common(ah);
191 struct ar5416_eeprom_4k *eep = 191 struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
192 (struct ar5416_eeprom_4k *) &ah->eeprom.map4k;
193 u16 *eepdata, temp, magic, magic2; 192 u16 *eepdata, temp, magic, magic2;
194 u32 sum = 0, el; 193 u32 sum = 0, el;
195 bool need_swap = false; 194 bool need_swap = false;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index aa614767adff..cd742fb944c2 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -132,7 +132,7 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
132 if (!dump_base_hdr) { 132 if (!dump_base_hdr) {
133 len += snprintf(buf + len, size - len, 133 len += snprintf(buf + len, size - len,
134 "%20s :\n", "2GHz modal Header"); 134 "%20s :\n", "2GHz modal Header");
135 len += ar9287_dump_modal_eeprom(buf, len, size, 135 len = ar9287_dump_modal_eeprom(buf, len, size,
136 &eep->modalHeader); 136 &eep->modalHeader);
137 goto out; 137 goto out;
138 } 138 }
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index b5fba8b18b8b..a8ac30a00720 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -211,11 +211,11 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
211 if (!dump_base_hdr) { 211 if (!dump_base_hdr) {
212 len += snprintf(buf + len, size - len, 212 len += snprintf(buf + len, size - len,
213 "%20s :\n", "2GHz modal Header"); 213 "%20s :\n", "2GHz modal Header");
214 len += ath9k_def_dump_modal_eeprom(buf, len, size, 214 len = ath9k_def_dump_modal_eeprom(buf, len, size,
215 &eep->modalHeader[0]); 215 &eep->modalHeader[0]);
216 len += snprintf(buf + len, size - len, 216 len += snprintf(buf + len, size - len,
217 "%20s :\n", "5GHz modal Header"); 217 "%20s :\n", "5GHz modal Header");
218 len += ath9k_def_dump_modal_eeprom(buf, len, size, 218 len = ath9k_def_dump_modal_eeprom(buf, len, size,
219 &eep->modalHeader[1]); 219 &eep->modalHeader[1]);
220 goto out; 220 goto out;
221 } 221 }
@@ -264,8 +264,7 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
264 264
265static int ath9k_hw_def_check_eeprom(struct ath_hw *ah) 265static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
266{ 266{
267 struct ar5416_eeprom_def *eep = 267 struct ar5416_eeprom_def *eep = &ah->eeprom.def;
268 (struct ar5416_eeprom_def *) &ah->eeprom.def;
269 struct ath_common *common = ath9k_hw_common(ah); 268 struct ath_common *common = ath9k_hw_common(ah);
270 u16 *eepdata, temp, magic, magic2; 269 u16 *eepdata, temp, magic, magic2;
271 u32 sum = 0, el; 270 u32 sum = 0, el;
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 281a9af0f1b6..bacdb8fb4ef4 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -132,17 +132,18 @@ static void ath_detect_bt_priority(struct ath_softc *sc)
132 132
133 if (time_after(jiffies, btcoex->bt_priority_time + 133 if (time_after(jiffies, btcoex->bt_priority_time +
134 msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) { 134 msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
135 sc->sc_flags &= ~(SC_OP_BT_PRIORITY_DETECTED | SC_OP_BT_SCAN); 135 clear_bit(BT_OP_PRIORITY_DETECTED, &btcoex->op_flags);
136 clear_bit(BT_OP_SCAN, &btcoex->op_flags);
136 /* Detect if colocated bt started scanning */ 137 /* Detect if colocated bt started scanning */
137 if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) { 138 if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) {
138 ath_dbg(ath9k_hw_common(sc->sc_ah), BTCOEX, 139 ath_dbg(ath9k_hw_common(sc->sc_ah), BTCOEX,
139 "BT scan detected\n"); 140 "BT scan detected\n");
140 sc->sc_flags |= (SC_OP_BT_SCAN | 141 set_bit(BT_OP_PRIORITY_DETECTED, &btcoex->op_flags);
141 SC_OP_BT_PRIORITY_DETECTED); 142 set_bit(BT_OP_SCAN, &btcoex->op_flags);
142 } else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) { 143 } else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
143 ath_dbg(ath9k_hw_common(sc->sc_ah), BTCOEX, 144 ath_dbg(ath9k_hw_common(sc->sc_ah), BTCOEX,
144 "BT priority traffic detected\n"); 145 "BT priority traffic detected\n");
145 sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED; 146 set_bit(BT_OP_PRIORITY_DETECTED, &btcoex->op_flags);
146 } 147 }
147 148
148 btcoex->bt_priority_cnt = 0; 149 btcoex->bt_priority_cnt = 0;
@@ -190,13 +191,34 @@ static void ath_btcoex_period_timer(unsigned long data)
190 struct ath_softc *sc = (struct ath_softc *) data; 191 struct ath_softc *sc = (struct ath_softc *) data;
191 struct ath_hw *ah = sc->sc_ah; 192 struct ath_hw *ah = sc->sc_ah;
192 struct ath_btcoex *btcoex = &sc->btcoex; 193 struct ath_btcoex *btcoex = &sc->btcoex;
194 struct ath_mci_profile *mci = &btcoex->mci;
193 u32 timer_period; 195 u32 timer_period;
194 bool is_btscan; 196 bool is_btscan;
197 unsigned long flags;
198
199 spin_lock_irqsave(&sc->sc_pm_lock, flags);
200 if (sc->sc_ah->power_mode == ATH9K_PM_NETWORK_SLEEP) {
201 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
202 goto skip_hw_wakeup;
203 }
204 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
195 205
196 ath9k_ps_wakeup(sc); 206 ath9k_ps_wakeup(sc);
197 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI)) 207 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
198 ath_detect_bt_priority(sc); 208 ath_detect_bt_priority(sc);
199 is_btscan = sc->sc_flags & SC_OP_BT_SCAN; 209 is_btscan = test_bit(BT_OP_SCAN, &btcoex->op_flags);
210
211 btcoex->bt_wait_time += btcoex->btcoex_period;
212 if (btcoex->bt_wait_time > ATH_BTCOEX_RX_WAIT_TIME) {
213 if (ar9003_mci_state(ah, MCI_STATE_NEED_FTP_STOMP) &&
214 (mci->num_pan || mci->num_other_acl))
215 ah->btcoex_hw.mci.stomp_ftp =
216 (sc->rx.num_pkts < ATH_BTCOEX_STOMP_FTP_THRESH);
217 else
218 ah->btcoex_hw.mci.stomp_ftp = false;
219 btcoex->bt_wait_time = 0;
220 sc->rx.num_pkts = 0;
221 }
200 222
201 spin_lock_bh(&btcoex->btcoex_lock); 223 spin_lock_bh(&btcoex->btcoex_lock);
202 224
@@ -218,9 +240,9 @@ static void ath_btcoex_period_timer(unsigned long data)
218 } 240 }
219 241
220 ath9k_ps_restore(sc); 242 ath9k_ps_restore(sc);
221 timer_period = btcoex->btcoex_period / 1000; 243skip_hw_wakeup:
222 mod_timer(&btcoex->period_timer, jiffies + 244 timer_period = btcoex->btcoex_period;
223 msecs_to_jiffies(timer_period)); 245 mod_timer(&btcoex->period_timer, jiffies + msecs_to_jiffies(timer_period));
224} 246}
225 247
226/* 248/*
@@ -233,14 +255,14 @@ static void ath_btcoex_no_stomp_timer(void *arg)
233 struct ath_hw *ah = sc->sc_ah; 255 struct ath_hw *ah = sc->sc_ah;
234 struct ath_btcoex *btcoex = &sc->btcoex; 256 struct ath_btcoex *btcoex = &sc->btcoex;
235 struct ath_common *common = ath9k_hw_common(ah); 257 struct ath_common *common = ath9k_hw_common(ah);
236 bool is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
237 258
238 ath_dbg(common, BTCOEX, "no stomp timer running\n"); 259 ath_dbg(common, BTCOEX, "no stomp timer running\n");
239 260
240 ath9k_ps_wakeup(sc); 261 ath9k_ps_wakeup(sc);
241 spin_lock_bh(&btcoex->btcoex_lock); 262 spin_lock_bh(&btcoex->btcoex_lock);
242 263
243 if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || is_btscan) 264 if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW ||
265 test_bit(BT_OP_SCAN, &btcoex->op_flags))
244 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE); 266 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
245 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL) 267 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
246 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW); 268 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW);
@@ -254,10 +276,10 @@ static int ath_init_btcoex_timer(struct ath_softc *sc)
254{ 276{
255 struct ath_btcoex *btcoex = &sc->btcoex; 277 struct ath_btcoex *btcoex = &sc->btcoex;
256 278
257 btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000; 279 btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD;
258 btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) * 280 btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) * 1000 *
259 btcoex->btcoex_period / 100; 281 btcoex->btcoex_period / 100;
260 btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) * 282 btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) * 1000 *
261 btcoex->btcoex_period / 100; 283 btcoex->btcoex_period / 100;
262 284
263 setup_timer(&btcoex->period_timer, ath_btcoex_period_timer, 285 setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
@@ -292,7 +314,8 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc)
292 314
293 btcoex->bt_priority_cnt = 0; 315 btcoex->bt_priority_cnt = 0;
294 btcoex->bt_priority_time = jiffies; 316 btcoex->bt_priority_time = jiffies;
295 sc->sc_flags &= ~(SC_OP_BT_PRIORITY_DETECTED | SC_OP_BT_SCAN); 317 clear_bit(BT_OP_PRIORITY_DETECTED, &btcoex->op_flags);
318 clear_bit(BT_OP_SCAN, &btcoex->op_flags);
296 319
297 mod_timer(&btcoex->period_timer, jiffies); 320 mod_timer(&btcoex->period_timer, jiffies);
298} 321}
@@ -314,14 +337,22 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc)
314 btcoex->hw_timer_enabled = false; 337 btcoex->hw_timer_enabled = false;
315} 338}
316 339
340void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc)
341{
342 struct ath_btcoex *btcoex = &sc->btcoex;
343
344 ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
345}
346
317u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen) 347u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen)
318{ 348{
349 struct ath_btcoex *btcoex = &sc->btcoex;
319 struct ath_mci_profile *mci = &sc->btcoex.mci; 350 struct ath_mci_profile *mci = &sc->btcoex.mci;
320 u16 aggr_limit = 0; 351 u16 aggr_limit = 0;
321 352
322 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) && mci->aggr_limit) 353 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) && mci->aggr_limit)
323 aggr_limit = (max_4ms_framelen * mci->aggr_limit) >> 4; 354 aggr_limit = (max_4ms_framelen * mci->aggr_limit) >> 4;
324 else if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED) 355 else if (test_bit(BT_OP_PRIORITY_DETECTED, &btcoex->op_flags))
325 aggr_limit = min((max_4ms_framelen * 3) / 8, 356 aggr_limit = min((max_4ms_framelen * 3) / 8,
326 (u32)ATH_AMPDU_LIMIT_MAX); 357 (u32)ATH_AMPDU_LIMIT_MAX);
327 358
@@ -362,9 +393,9 @@ void ath9k_stop_btcoex(struct ath_softc *sc)
362 393
363 if (ah->btcoex_hw.enabled && 394 if (ah->btcoex_hw.enabled &&
364 ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) { 395 ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) {
365 ath9k_hw_btcoex_disable(ah);
366 if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE) 396 if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
367 ath9k_btcoex_timer_pause(sc); 397 ath9k_btcoex_timer_pause(sc);
398 ath9k_hw_btcoex_disable(ah);
368 if (AR_SREV_9462(ah)) 399 if (AR_SREV_9462(ah))
369 ath_mci_flush_profile(&sc->btcoex.mci); 400 ath_mci_flush_profile(&sc->btcoex.mci);
370 } 401 }
@@ -372,11 +403,13 @@ void ath9k_stop_btcoex(struct ath_softc *sc)
372 403
373void ath9k_deinit_btcoex(struct ath_softc *sc) 404void ath9k_deinit_btcoex(struct ath_softc *sc)
374{ 405{
406 struct ath_hw *ah = sc->sc_ah;
407
375 if ((sc->btcoex.no_stomp_timer) && 408 if ((sc->btcoex.no_stomp_timer) &&
376 ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_3WIRE) 409 ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_3WIRE)
377 ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer); 410 ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
378 411
379 if (AR_SREV_9462(sc->sc_ah)) 412 if (ath9k_hw_mci_is_enabled(ah))
380 ath_mci_cleanup(sc); 413 ath_mci_cleanup(sc);
381} 414}
382 415
@@ -402,7 +435,7 @@ int ath9k_init_btcoex(struct ath_softc *sc)
402 txq = sc->tx.txq_map[WME_AC_BE]; 435 txq = sc->tx.txq_map[WME_AC_BE];
403 ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum); 436 ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
404 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW; 437 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
405 if (AR_SREV_9462(ah)) { 438 if (ath9k_hw_mci_is_enabled(ah)) {
406 sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE; 439 sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
407 INIT_LIST_HEAD(&sc->btcoex.mci.info); 440 INIT_LIST_HEAD(&sc->btcoex.mci.info);
408 441
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 135795257d95..936e920fb88e 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -453,7 +453,6 @@ struct ath9k_htc_priv {
453 u8 num_sta_assoc_vif; 453 u8 num_sta_assoc_vif;
454 u8 num_ap_vif; 454 u8 num_ap_vif;
455 455
456 u16 op_flags;
457 u16 curtxpow; 456 u16 curtxpow;
458 u16 txpowlimit; 457 u16 txpowlimit;
459 u16 nvifs; 458 u16 nvifs;
@@ -461,6 +460,7 @@ struct ath9k_htc_priv {
461 bool rearm_ani; 460 bool rearm_ani;
462 bool reconfig_beacon; 461 bool reconfig_beacon;
463 unsigned int rxfilter; 462 unsigned int rxfilter;
463 unsigned long op_flags;
464 464
465 struct ath9k_hw_cal_data caldata; 465 struct ath9k_hw_cal_data caldata;
466 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; 466 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
@@ -572,8 +572,6 @@ bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
572 572
573void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv); 573void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv);
574void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw); 574void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw);
575void ath9k_htc_radio_enable(struct ieee80211_hw *hw);
576void ath9k_htc_radio_disable(struct ieee80211_hw *hw);
577 575
578#ifdef CONFIG_MAC80211_LEDS 576#ifdef CONFIG_MAC80211_LEDS
579void ath9k_init_leds(struct ath9k_htc_priv *priv); 577void ath9k_init_leds(struct ath9k_htc_priv *priv);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index 2eadffb7971c..77d541feb910 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -207,9 +207,9 @@ static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv,
207 else 207 else
208 priv->ah->config.sw_beacon_response_time = MIN_SWBA_RESPONSE; 208 priv->ah->config.sw_beacon_response_time = MIN_SWBA_RESPONSE;
209 209
210 if (priv->op_flags & OP_TSF_RESET) { 210 if (test_bit(OP_TSF_RESET, &priv->op_flags)) {
211 ath9k_hw_reset_tsf(priv->ah); 211 ath9k_hw_reset_tsf(priv->ah);
212 priv->op_flags &= ~OP_TSF_RESET; 212 clear_bit(OP_TSF_RESET, &priv->op_flags);
213 } else { 213 } else {
214 /* 214 /*
215 * Pull nexttbtt forward to reflect the current TSF. 215 * Pull nexttbtt forward to reflect the current TSF.
@@ -221,7 +221,7 @@ static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv,
221 } while (nexttbtt < tsftu); 221 } while (nexttbtt < tsftu);
222 } 222 }
223 223
224 if (priv->op_flags & OP_ENABLE_BEACON) 224 if (test_bit(OP_ENABLE_BEACON, &priv->op_flags))
225 imask |= ATH9K_INT_SWBA; 225 imask |= ATH9K_INT_SWBA;
226 226
227 ath_dbg(common, CONFIG, 227 ath_dbg(common, CONFIG,
@@ -269,7 +269,7 @@ static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
269 else 269 else
270 priv->ah->config.sw_beacon_response_time = MIN_SWBA_RESPONSE; 270 priv->ah->config.sw_beacon_response_time = MIN_SWBA_RESPONSE;
271 271
272 if (priv->op_flags & OP_ENABLE_BEACON) 272 if (test_bit(OP_ENABLE_BEACON, &priv->op_flags))
273 imask |= ATH9K_INT_SWBA; 273 imask |= ATH9K_INT_SWBA;
274 274
275 ath_dbg(common, CONFIG, 275 ath_dbg(common, CONFIG,
@@ -365,7 +365,7 @@ static void ath9k_htc_send_beacon(struct ath9k_htc_priv *priv,
365 vif = priv->cur_beacon_conf.bslot[slot]; 365 vif = priv->cur_beacon_conf.bslot[slot];
366 avp = (struct ath9k_htc_vif *)vif->drv_priv; 366 avp = (struct ath9k_htc_vif *)vif->drv_priv;
367 367
368 if (unlikely(priv->op_flags & OP_SCANNING)) { 368 if (unlikely(test_bit(OP_SCANNING, &priv->op_flags))) {
369 spin_unlock_bh(&priv->beacon_lock); 369 spin_unlock_bh(&priv->beacon_lock);
370 return; 370 return;
371 } 371 }
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
index 1c10e2e5c237..07df279c8d46 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -37,17 +37,18 @@ static void ath_detect_bt_priority(struct ath9k_htc_priv *priv)
37 37
38 if (time_after(jiffies, btcoex->bt_priority_time + 38 if (time_after(jiffies, btcoex->bt_priority_time +
39 msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) { 39 msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
40 priv->op_flags &= ~(OP_BT_PRIORITY_DETECTED | OP_BT_SCAN); 40 clear_bit(OP_BT_PRIORITY_DETECTED, &priv->op_flags);
41 clear_bit(OP_BT_SCAN, &priv->op_flags);
41 /* Detect if colocated bt started scanning */ 42 /* Detect if colocated bt started scanning */
42 if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) { 43 if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) {
43 ath_dbg(ath9k_hw_common(ah), BTCOEX, 44 ath_dbg(ath9k_hw_common(ah), BTCOEX,
44 "BT scan detected\n"); 45 "BT scan detected\n");
45 priv->op_flags |= (OP_BT_SCAN | 46 set_bit(OP_BT_PRIORITY_DETECTED, &priv->op_flags);
46 OP_BT_PRIORITY_DETECTED); 47 set_bit(OP_BT_SCAN, &priv->op_flags);
47 } else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) { 48 } else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
48 ath_dbg(ath9k_hw_common(ah), BTCOEX, 49 ath_dbg(ath9k_hw_common(ah), BTCOEX,
49 "BT priority traffic detected\n"); 50 "BT priority traffic detected\n");
50 priv->op_flags |= OP_BT_PRIORITY_DETECTED; 51 set_bit(OP_BT_PRIORITY_DETECTED, &priv->op_flags);
51 } 52 }
52 53
53 btcoex->bt_priority_cnt = 0; 54 btcoex->bt_priority_cnt = 0;
@@ -67,26 +68,23 @@ static void ath_btcoex_period_work(struct work_struct *work)
67 struct ath_btcoex *btcoex = &priv->btcoex; 68 struct ath_btcoex *btcoex = &priv->btcoex;
68 struct ath_common *common = ath9k_hw_common(priv->ah); 69 struct ath_common *common = ath9k_hw_common(priv->ah);
69 u32 timer_period; 70 u32 timer_period;
70 bool is_btscan;
71 int ret; 71 int ret;
72 72
73 ath_detect_bt_priority(priv); 73 ath_detect_bt_priority(priv);
74 74
75 is_btscan = !!(priv->op_flags & OP_BT_SCAN);
76
77 ret = ath9k_htc_update_cap_target(priv, 75 ret = ath9k_htc_update_cap_target(priv,
78 !!(priv->op_flags & OP_BT_PRIORITY_DETECTED)); 76 test_bit(OP_BT_PRIORITY_DETECTED, &priv->op_flags));
79 if (ret) { 77 if (ret) {
80 ath_err(common, "Unable to set BTCOEX parameters\n"); 78 ath_err(common, "Unable to set BTCOEX parameters\n");
81 return; 79 return;
82 } 80 }
83 81
84 ath9k_hw_btcoex_bt_stomp(priv->ah, is_btscan ? ATH_BTCOEX_STOMP_ALL : 82 ath9k_hw_btcoex_bt_stomp(priv->ah, test_bit(OP_BT_SCAN, &priv->op_flags) ?
85 btcoex->bt_stomp_type); 83 ATH_BTCOEX_STOMP_ALL : btcoex->bt_stomp_type);
86 84
87 ath9k_hw_btcoex_enable(priv->ah); 85 ath9k_hw_btcoex_enable(priv->ah);
88 timer_period = is_btscan ? btcoex->btscan_no_stomp : 86 timer_period = test_bit(OP_BT_SCAN, &priv->op_flags) ?
89 btcoex->btcoex_no_stomp; 87 btcoex->btscan_no_stomp : btcoex->btcoex_no_stomp;
90 ieee80211_queue_delayed_work(priv->hw, &priv->duty_cycle_work, 88 ieee80211_queue_delayed_work(priv->hw, &priv->duty_cycle_work,
91 msecs_to_jiffies(timer_period)); 89 msecs_to_jiffies(timer_period));
92 ieee80211_queue_delayed_work(priv->hw, &priv->coex_period_work, 90 ieee80211_queue_delayed_work(priv->hw, &priv->coex_period_work,
@@ -104,14 +102,15 @@ static void ath_btcoex_duty_cycle_work(struct work_struct *work)
104 struct ath_hw *ah = priv->ah; 102 struct ath_hw *ah = priv->ah;
105 struct ath_btcoex *btcoex = &priv->btcoex; 103 struct ath_btcoex *btcoex = &priv->btcoex;
106 struct ath_common *common = ath9k_hw_common(ah); 104 struct ath_common *common = ath9k_hw_common(ah);
107 bool is_btscan = priv->op_flags & OP_BT_SCAN;
108 105
109 ath_dbg(common, BTCOEX, "time slice work for bt and wlan\n"); 106 ath_dbg(common, BTCOEX, "time slice work for bt and wlan\n");
110 107
111 if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || is_btscan) 108 if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW ||
109 test_bit(OP_BT_SCAN, &priv->op_flags))
112 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE); 110 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
113 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL) 111 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
114 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW); 112 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW);
113
115 ath9k_hw_btcoex_enable(priv->ah); 114 ath9k_hw_btcoex_enable(priv->ah);
116} 115}
117 116
@@ -141,7 +140,8 @@ static void ath_htc_resume_btcoex_work(struct ath9k_htc_priv *priv)
141 140
142 btcoex->bt_priority_cnt = 0; 141 btcoex->bt_priority_cnt = 0;
143 btcoex->bt_priority_time = jiffies; 142 btcoex->bt_priority_time = jiffies;
144 priv->op_flags &= ~(OP_BT_PRIORITY_DETECTED | OP_BT_SCAN); 143 clear_bit(OP_BT_PRIORITY_DETECTED, &priv->op_flags);
144 clear_bit(OP_BT_SCAN, &priv->op_flags);
145 ieee80211_queue_delayed_work(priv->hw, &priv->coex_period_work, 0); 145 ieee80211_queue_delayed_work(priv->hw, &priv->coex_period_work, 0);
146} 146}
147 147
@@ -310,95 +310,3 @@ void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv)
310 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT) 310 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
311 wiphy_rfkill_start_polling(priv->hw->wiphy); 311 wiphy_rfkill_start_polling(priv->hw->wiphy);
312} 312}
313
314void ath9k_htc_radio_enable(struct ieee80211_hw *hw)
315{
316 struct ath9k_htc_priv *priv = hw->priv;
317 struct ath_hw *ah = priv->ah;
318 struct ath_common *common = ath9k_hw_common(ah);
319 int ret;
320 u8 cmd_rsp;
321
322 if (!ah->curchan)
323 ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
324
325 /* Reset the HW */
326 ret = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
327 if (ret) {
328 ath_err(common,
329 "Unable to reset hardware; reset status %d (freq %u MHz)\n",
330 ret, ah->curchan->channel);
331 }
332
333 ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
334 &priv->curtxpow);
335
336 /* Start RX */
337 WMI_CMD(WMI_START_RECV_CMDID);
338 ath9k_host_rx_init(priv);
339
340 /* Start TX */
341 htc_start(priv->htc);
342 spin_lock_bh(&priv->tx.tx_lock);
343 priv->tx.flags &= ~ATH9K_HTC_OP_TX_QUEUES_STOP;
344 spin_unlock_bh(&priv->tx.tx_lock);
345 ieee80211_wake_queues(hw);
346
347 WMI_CMD(WMI_ENABLE_INTR_CMDID);
348
349 /* Enable LED */
350 ath9k_hw_cfg_output(ah, ah->led_pin,
351 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
352 ath9k_hw_set_gpio(ah, ah->led_pin, 0);
353}
354
355void ath9k_htc_radio_disable(struct ieee80211_hw *hw)
356{
357 struct ath9k_htc_priv *priv = hw->priv;
358 struct ath_hw *ah = priv->ah;
359 struct ath_common *common = ath9k_hw_common(ah);
360 int ret;
361 u8 cmd_rsp;
362
363 ath9k_htc_ps_wakeup(priv);
364
365 /* Disable LED */
366 ath9k_hw_set_gpio(ah, ah->led_pin, 1);
367 ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
368
369 WMI_CMD(WMI_DISABLE_INTR_CMDID);
370
371 /* Stop TX */
372 ieee80211_stop_queues(hw);
373 ath9k_htc_tx_drain(priv);
374 WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
375
376 /* Stop RX */
377 WMI_CMD(WMI_STOP_RECV_CMDID);
378
379 /* Clear the WMI event queue */
380 ath9k_wmi_event_drain(priv);
381
382 /*
383 * The MIB counters have to be disabled here,
384 * since the target doesn't do it.
385 */
386 ath9k_hw_disable_mib_counters(ah);
387
388 if (!ah->curchan)
389 ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
390
391 /* Reset the HW */
392 ret = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
393 if (ret) {
394 ath_err(common,
395 "Unable to reset hardware; reset status %d (freq %u MHz)\n",
396 ret, ah->curchan->channel);
397 }
398
399 /* Disable the PHY */
400 ath9k_hw_phy_disable(ah);
401
402 ath9k_htc_ps_restore(priv);
403 ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
404}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 25213d521bc2..a035a380d669 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -611,7 +611,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
611 struct ath_common *common; 611 struct ath_common *common;
612 int i, ret = 0, csz = 0; 612 int i, ret = 0, csz = 0;
613 613
614 priv->op_flags |= OP_INVALID; 614 set_bit(OP_INVALID, &priv->op_flags);
615 615
616 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL); 616 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
617 if (!ah) 617 if (!ah)
@@ -718,7 +718,7 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
718 718
719 hw->queues = 4; 719 hw->queues = 4;
720 hw->channel_change_time = 5000; 720 hw->channel_change_time = 5000;
721 hw->max_listen_interval = 10; 721 hw->max_listen_interval = 1;
722 722
723 hw->vif_data_size = sizeof(struct ath9k_htc_vif); 723 hw->vif_data_size = sizeof(struct ath9k_htc_vif);
724 hw->sta_data_size = sizeof(struct ath9k_htc_sta); 724 hw->sta_data_size = sizeof(struct ath9k_htc_sta);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index abbd6effd60d..374c32ed905a 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -75,14 +75,19 @@ unlock:
75 75
76void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv) 76void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv)
77{ 77{
78 bool reset;
79
78 mutex_lock(&priv->htc_pm_lock); 80 mutex_lock(&priv->htc_pm_lock);
79 if (--priv->ps_usecount != 0) 81 if (--priv->ps_usecount != 0)
80 goto unlock; 82 goto unlock;
81 83
82 if (priv->ps_idle) 84 if (priv->ps_idle) {
85 ath9k_hw_setrxabort(priv->ah, true);
86 ath9k_hw_stopdmarecv(priv->ah, &reset);
83 ath9k_hw_setpower(priv->ah, ATH9K_PM_FULL_SLEEP); 87 ath9k_hw_setpower(priv->ah, ATH9K_PM_FULL_SLEEP);
84 else if (priv->ps_enabled) 88 } else if (priv->ps_enabled) {
85 ath9k_hw_setpower(priv->ah, ATH9K_PM_NETWORK_SLEEP); 89 ath9k_hw_setpower(priv->ah, ATH9K_PM_NETWORK_SLEEP);
90 }
86 91
87unlock: 92unlock:
88 mutex_unlock(&priv->htc_pm_lock); 93 mutex_unlock(&priv->htc_pm_lock);
@@ -250,7 +255,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
250 u8 cmd_rsp; 255 u8 cmd_rsp;
251 int ret; 256 int ret;
252 257
253 if (priv->op_flags & OP_INVALID) 258 if (test_bit(OP_INVALID, &priv->op_flags))
254 return -EIO; 259 return -EIO;
255 260
256 fastcc = !!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL); 261 fastcc = !!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL);
@@ -304,7 +309,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
304 309
305 htc_start(priv->htc); 310 htc_start(priv->htc);
306 311
307 if (!(priv->op_flags & OP_SCANNING) && 312 if (!test_bit(OP_SCANNING, &priv->op_flags) &&
308 !(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) 313 !(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
309 ath9k_htc_vif_reconfig(priv); 314 ath9k_htc_vif_reconfig(priv);
310 315
@@ -750,7 +755,7 @@ void ath9k_htc_start_ani(struct ath9k_htc_priv *priv)
750 common->ani.shortcal_timer = timestamp; 755 common->ani.shortcal_timer = timestamp;
751 common->ani.checkani_timer = timestamp; 756 common->ani.checkani_timer = timestamp;
752 757
753 priv->op_flags |= OP_ANI_RUNNING; 758 set_bit(OP_ANI_RUNNING, &priv->op_flags);
754 759
755 ieee80211_queue_delayed_work(common->hw, &priv->ani_work, 760 ieee80211_queue_delayed_work(common->hw, &priv->ani_work,
756 msecs_to_jiffies(ATH_ANI_POLLINTERVAL)); 761 msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
@@ -759,7 +764,7 @@ void ath9k_htc_start_ani(struct ath9k_htc_priv *priv)
759void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv) 764void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv)
760{ 765{
761 cancel_delayed_work_sync(&priv->ani_work); 766 cancel_delayed_work_sync(&priv->ani_work);
762 priv->op_flags &= ~OP_ANI_RUNNING; 767 clear_bit(OP_ANI_RUNNING, &priv->op_flags);
763} 768}
764 769
765void ath9k_htc_ani_work(struct work_struct *work) 770void ath9k_htc_ani_work(struct work_struct *work)
@@ -944,7 +949,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
944 ath_dbg(common, CONFIG, 949 ath_dbg(common, CONFIG,
945 "Failed to update capability in target\n"); 950 "Failed to update capability in target\n");
946 951
947 priv->op_flags &= ~OP_INVALID; 952 clear_bit(OP_INVALID, &priv->op_flags);
948 htc_start(priv->htc); 953 htc_start(priv->htc);
949 954
950 spin_lock_bh(&priv->tx.tx_lock); 955 spin_lock_bh(&priv->tx.tx_lock);
@@ -973,7 +978,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
973 978
974 mutex_lock(&priv->mutex); 979 mutex_lock(&priv->mutex);
975 980
976 if (priv->op_flags & OP_INVALID) { 981 if (test_bit(OP_INVALID, &priv->op_flags)) {
977 ath_dbg(common, ANY, "Device not present\n"); 982 ath_dbg(common, ANY, "Device not present\n");
978 mutex_unlock(&priv->mutex); 983 mutex_unlock(&priv->mutex);
979 return; 984 return;
@@ -1015,7 +1020,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
1015 ath9k_htc_ps_restore(priv); 1020 ath9k_htc_ps_restore(priv);
1016 ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP); 1021 ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
1017 1022
1018 priv->op_flags |= OP_INVALID; 1023 set_bit(OP_INVALID, &priv->op_flags);
1019 1024
1020 ath_dbg(common, CONFIG, "Driver halt\n"); 1025 ath_dbg(common, CONFIG, "Driver halt\n");
1021 mutex_unlock(&priv->mutex); 1026 mutex_unlock(&priv->mutex);
@@ -1105,7 +1110,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
1105 ath9k_htc_set_opmode(priv); 1110 ath9k_htc_set_opmode(priv);
1106 1111
1107 if ((priv->ah->opmode == NL80211_IFTYPE_AP) && 1112 if ((priv->ah->opmode == NL80211_IFTYPE_AP) &&
1108 !(priv->op_flags & OP_ANI_RUNNING)) { 1113 !test_bit(OP_ANI_RUNNING, &priv->op_flags)) {
1109 ath9k_hw_set_tsfadjust(priv->ah, 1); 1114 ath9k_hw_set_tsfadjust(priv->ah, 1);
1110 ath9k_htc_start_ani(priv); 1115 ath9k_htc_start_ani(priv);
1111 } 1116 }
@@ -1178,24 +1183,20 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
1178 struct ath9k_htc_priv *priv = hw->priv; 1183 struct ath9k_htc_priv *priv = hw->priv;
1179 struct ath_common *common = ath9k_hw_common(priv->ah); 1184 struct ath_common *common = ath9k_hw_common(priv->ah);
1180 struct ieee80211_conf *conf = &hw->conf; 1185 struct ieee80211_conf *conf = &hw->conf;
1186 bool chip_reset = false;
1187 int ret = 0;
1181 1188
1182 mutex_lock(&priv->mutex); 1189 mutex_lock(&priv->mutex);
1190 ath9k_htc_ps_wakeup(priv);
1183 1191
1184 if (changed & IEEE80211_CONF_CHANGE_IDLE) { 1192 if (changed & IEEE80211_CONF_CHANGE_IDLE) {
1185 bool enable_radio = false;
1186 bool idle = !!(conf->flags & IEEE80211_CONF_IDLE);
1187
1188 mutex_lock(&priv->htc_pm_lock); 1193 mutex_lock(&priv->htc_pm_lock);
1189 if (!idle && priv->ps_idle)
1190 enable_radio = true;
1191 priv->ps_idle = idle;
1192 mutex_unlock(&priv->htc_pm_lock);
1193 1194
1194 if (enable_radio) { 1195 priv->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
1195 ath_dbg(common, CONFIG, "not-idle: enabling radio\n"); 1196 if (priv->ps_idle)
1196 ath9k_htc_setpower(priv, ATH9K_PM_AWAKE); 1197 chip_reset = true;
1197 ath9k_htc_radio_enable(hw); 1198
1198 } 1199 mutex_unlock(&priv->htc_pm_lock);
1199 } 1200 }
1200 1201
1201 /* 1202 /*
@@ -1210,7 +1211,7 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
1210 ath9k_htc_remove_monitor_interface(priv); 1211 ath9k_htc_remove_monitor_interface(priv);
1211 } 1212 }
1212 1213
1213 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 1214 if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || chip_reset) {
1214 struct ieee80211_channel *curchan = hw->conf.channel; 1215 struct ieee80211_channel *curchan = hw->conf.channel;
1215 int pos = curchan->hw_value; 1216 int pos = curchan->hw_value;
1216 1217
@@ -1223,8 +1224,8 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
1223 1224
1224 if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) { 1225 if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) {
1225 ath_err(common, "Unable to set channel\n"); 1226 ath_err(common, "Unable to set channel\n");
1226 mutex_unlock(&priv->mutex); 1227 ret = -EINVAL;
1227 return -EINVAL; 1228 goto out;
1228 } 1229 }
1229 1230
1230 } 1231 }
@@ -1246,21 +1247,10 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
1246 priv->txpowlimit, &priv->curtxpow); 1247 priv->txpowlimit, &priv->curtxpow);
1247 } 1248 }
1248 1249
1249 if (changed & IEEE80211_CONF_CHANGE_IDLE) {
1250 mutex_lock(&priv->htc_pm_lock);
1251 if (!priv->ps_idle) {
1252 mutex_unlock(&priv->htc_pm_lock);
1253 goto out;
1254 }
1255 mutex_unlock(&priv->htc_pm_lock);
1256
1257 ath_dbg(common, CONFIG, "idle: disabling radio\n");
1258 ath9k_htc_radio_disable(hw);
1259 }
1260
1261out: 1250out:
1251 ath9k_htc_ps_restore(priv);
1262 mutex_unlock(&priv->mutex); 1252 mutex_unlock(&priv->mutex);
1263 return 0; 1253 return ret;
1264} 1254}
1265 1255
1266#define SUPPORTED_FILTERS \ 1256#define SUPPORTED_FILTERS \
@@ -1285,7 +1275,7 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
1285 changed_flags &= SUPPORTED_FILTERS; 1275 changed_flags &= SUPPORTED_FILTERS;
1286 *total_flags &= SUPPORTED_FILTERS; 1276 *total_flags &= SUPPORTED_FILTERS;
1287 1277
1288 if (priv->op_flags & OP_INVALID) { 1278 if (test_bit(OP_INVALID, &priv->op_flags)) {
1289 ath_dbg(ath9k_hw_common(priv->ah), ANY, 1279 ath_dbg(ath9k_hw_common(priv->ah), ANY,
1290 "Unable to configure filter on invalid state\n"); 1280 "Unable to configure filter on invalid state\n");
1291 mutex_unlock(&priv->mutex); 1281 mutex_unlock(&priv->mutex);
@@ -1516,7 +1506,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
1516 ath_dbg(common, CONFIG, "Beacon enabled for BSS: %pM\n", 1506 ath_dbg(common, CONFIG, "Beacon enabled for BSS: %pM\n",
1517 bss_conf->bssid); 1507 bss_conf->bssid);
1518 ath9k_htc_set_tsfadjust(priv, vif); 1508 ath9k_htc_set_tsfadjust(priv, vif);
1519 priv->op_flags |= OP_ENABLE_BEACON; 1509 set_bit(OP_ENABLE_BEACON, &priv->op_flags);
1520 ath9k_htc_beacon_config(priv, vif); 1510 ath9k_htc_beacon_config(priv, vif);
1521 } 1511 }
1522 1512
@@ -1529,7 +1519,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
1529 ath_dbg(common, CONFIG, 1519 ath_dbg(common, CONFIG,
1530 "Beacon disabled for BSS: %pM\n", 1520 "Beacon disabled for BSS: %pM\n",
1531 bss_conf->bssid); 1521 bss_conf->bssid);
1532 priv->op_flags &= ~OP_ENABLE_BEACON; 1522 clear_bit(OP_ENABLE_BEACON, &priv->op_flags);
1533 ath9k_htc_beacon_config(priv, vif); 1523 ath9k_htc_beacon_config(priv, vif);
1534 } 1524 }
1535 } 1525 }
@@ -1542,7 +1532,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
1542 (priv->nvifs == 1) && 1532 (priv->nvifs == 1) &&
1543 (priv->num_ap_vif == 1) && 1533 (priv->num_ap_vif == 1) &&
1544 (vif->type == NL80211_IFTYPE_AP)) { 1534 (vif->type == NL80211_IFTYPE_AP)) {
1545 priv->op_flags |= OP_TSF_RESET; 1535 set_bit(OP_TSF_RESET, &priv->op_flags);
1546 } 1536 }
1547 ath_dbg(common, CONFIG, 1537 ath_dbg(common, CONFIG,
1548 "Beacon interval changed for BSS: %pM\n", 1538 "Beacon interval changed for BSS: %pM\n",
@@ -1654,7 +1644,7 @@ static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
1654 1644
1655 mutex_lock(&priv->mutex); 1645 mutex_lock(&priv->mutex);
1656 spin_lock_bh(&priv->beacon_lock); 1646 spin_lock_bh(&priv->beacon_lock);
1657 priv->op_flags |= OP_SCANNING; 1647 set_bit(OP_SCANNING, &priv->op_flags);
1658 spin_unlock_bh(&priv->beacon_lock); 1648 spin_unlock_bh(&priv->beacon_lock);
1659 cancel_work_sync(&priv->ps_work); 1649 cancel_work_sync(&priv->ps_work);
1660 ath9k_htc_stop_ani(priv); 1650 ath9k_htc_stop_ani(priv);
@@ -1667,7 +1657,7 @@ static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw)
1667 1657
1668 mutex_lock(&priv->mutex); 1658 mutex_lock(&priv->mutex);
1669 spin_lock_bh(&priv->beacon_lock); 1659 spin_lock_bh(&priv->beacon_lock);
1670 priv->op_flags &= ~OP_SCANNING; 1660 clear_bit(OP_SCANNING, &priv->op_flags);
1671 spin_unlock_bh(&priv->beacon_lock); 1661 spin_unlock_bh(&priv->beacon_lock);
1672 ath9k_htc_ps_wakeup(priv); 1662 ath9k_htc_ps_wakeup(priv);
1673 ath9k_htc_vif_reconfig(priv); 1663 ath9k_htc_vif_reconfig(priv);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 3e40a6461512..47e61d0da33b 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -916,7 +916,7 @@ void ath9k_host_rx_init(struct ath9k_htc_priv *priv)
916{ 916{
917 ath9k_hw_rxena(priv->ah); 917 ath9k_hw_rxena(priv->ah);
918 ath9k_htc_opmode_init(priv); 918 ath9k_htc_opmode_init(priv);
919 ath9k_hw_startpcureceive(priv->ah, (priv->op_flags & OP_SCANNING)); 919 ath9k_hw_startpcureceive(priv->ah, test_bit(OP_SCANNING, &priv->op_flags));
920 priv->rx.last_rssi = ATH_RSSI_DUMMY_MARKER; 920 priv->rx.last_rssi = ATH_RSSI_DUMMY_MARKER;
921} 921}
922 922
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 995ca8e1302e..c1659d079513 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -342,6 +342,9 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
342 val = REG_READ(ah, AR_SREV); 342 val = REG_READ(ah, AR_SREV);
343 ah->hw_version.macRev = MS(val, AR_SREV_REVISION2); 343 ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
344 return; 344 return;
345 case AR9300_DEVID_QCA955X:
346 ah->hw_version.macVersion = AR_SREV_VERSION_9550;
347 return;
345 } 348 }
346 349
347 val = REG_READ(ah, AR_SREV) & AR_SREV_ID; 350 val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
@@ -390,14 +393,6 @@ static void ath9k_hw_disablepcie(struct ath_hw *ah)
390 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000); 393 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
391} 394}
392 395
393static void ath9k_hw_aspm_init(struct ath_hw *ah)
394{
395 struct ath_common *common = ath9k_hw_common(ah);
396
397 if (common->bus_ops->aspm_init)
398 common->bus_ops->aspm_init(common);
399}
400
401/* This should work for all families including legacy */ 396/* This should work for all families including legacy */
402static bool ath9k_hw_chip_test(struct ath_hw *ah) 397static bool ath9k_hw_chip_test(struct ath_hw *ah)
403{ 398{
@@ -654,6 +649,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
654 case AR_SREV_VERSION_9485: 649 case AR_SREV_VERSION_9485:
655 case AR_SREV_VERSION_9340: 650 case AR_SREV_VERSION_9340:
656 case AR_SREV_VERSION_9462: 651 case AR_SREV_VERSION_9462:
652 case AR_SREV_VERSION_9550:
657 break; 653 break;
658 default: 654 default:
659 ath_err(common, 655 ath_err(common,
@@ -663,7 +659,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
663 } 659 }
664 660
665 if (AR_SREV_9271(ah) || AR_SREV_9100(ah) || AR_SREV_9340(ah) || 661 if (AR_SREV_9271(ah) || AR_SREV_9100(ah) || AR_SREV_9340(ah) ||
666 AR_SREV_9330(ah)) 662 AR_SREV_9330(ah) || AR_SREV_9550(ah))
667 ah->is_pciexpress = false; 663 ah->is_pciexpress = false;
668 664
669 ah->hw_version.phyRev = REG_READ(ah, AR_PHY_CHIP_ID); 665 ah->hw_version.phyRev = REG_READ(ah, AR_PHY_CHIP_ID);
@@ -693,9 +689,6 @@ static int __ath9k_hw_init(struct ath_hw *ah)
693 if (r) 689 if (r)
694 return r; 690 return r;
695 691
696 if (ah->is_pciexpress)
697 ath9k_hw_aspm_init(ah);
698
699 r = ath9k_hw_init_macaddr(ah); 692 r = ath9k_hw_init_macaddr(ah);
700 if (r) { 693 if (r) {
701 ath_err(common, "Failed to initialize MAC address\n"); 694 ath_err(common, "Failed to initialize MAC address\n");
@@ -738,6 +731,7 @@ int ath9k_hw_init(struct ath_hw *ah)
738 case AR9300_DEVID_AR9485_PCIE: 731 case AR9300_DEVID_AR9485_PCIE:
739 case AR9300_DEVID_AR9330: 732 case AR9300_DEVID_AR9330:
740 case AR9300_DEVID_AR9340: 733 case AR9300_DEVID_AR9340:
734 case AR9300_DEVID_QCA955X:
741 case AR9300_DEVID_AR9580: 735 case AR9300_DEVID_AR9580:
742 case AR9300_DEVID_AR9462: 736 case AR9300_DEVID_AR9462:
743 break; 737 break;
@@ -876,7 +870,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
876 /* program BB PLL phase_shift */ 870 /* program BB PLL phase_shift */
877 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3, 871 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3,
878 AR_CH0_BB_DPLL3_PHASE_SHIFT, 0x1); 872 AR_CH0_BB_DPLL3_PHASE_SHIFT, 0x1);
879 } else if (AR_SREV_9340(ah)) { 873 } else if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) {
880 u32 regval, pll2_divint, pll2_divfrac, refdiv; 874 u32 regval, pll2_divint, pll2_divfrac, refdiv;
881 875
882 REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x1142c); 876 REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x1142c);
@@ -890,9 +884,15 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
890 pll2_divfrac = 0x1eb85; 884 pll2_divfrac = 0x1eb85;
891 refdiv = 3; 885 refdiv = 3;
892 } else { 886 } else {
893 pll2_divint = 88; 887 if (AR_SREV_9340(ah)) {
894 pll2_divfrac = 0; 888 pll2_divint = 88;
895 refdiv = 5; 889 pll2_divfrac = 0;
890 refdiv = 5;
891 } else {
892 pll2_divint = 0x11;
893 pll2_divfrac = 0x26666;
894 refdiv = 1;
895 }
896 } 896 }
897 897
898 regval = REG_READ(ah, AR_PHY_PLL_MODE); 898 regval = REG_READ(ah, AR_PHY_PLL_MODE);
@@ -905,8 +905,12 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
905 udelay(100); 905 udelay(100);
906 906
907 regval = REG_READ(ah, AR_PHY_PLL_MODE); 907 regval = REG_READ(ah, AR_PHY_PLL_MODE);
908 regval = (regval & 0x80071fff) | (0x1 << 30) | (0x1 << 13) | 908 if (AR_SREV_9340(ah))
909 (0x4 << 26) | (0x18 << 19); 909 regval = (regval & 0x80071fff) | (0x1 << 30) |
910 (0x1 << 13) | (0x4 << 26) | (0x18 << 19);
911 else
912 regval = (regval & 0x80071fff) | (0x3 << 30) |
913 (0x1 << 13) | (0x4 << 26) | (0x60 << 19);
910 REG_WRITE(ah, AR_PHY_PLL_MODE, regval); 914 REG_WRITE(ah, AR_PHY_PLL_MODE, regval);
911 REG_WRITE(ah, AR_PHY_PLL_MODE, 915 REG_WRITE(ah, AR_PHY_PLL_MODE,
912 REG_READ(ah, AR_PHY_PLL_MODE) & 0xfffeffff); 916 REG_READ(ah, AR_PHY_PLL_MODE) & 0xfffeffff);
@@ -917,7 +921,8 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
917 921
918 REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll); 922 REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
919 923
920 if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah)) 924 if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) ||
925 AR_SREV_9550(ah))
921 udelay(1000); 926 udelay(1000);
922 927
923 /* Switch the core clock for ar9271 to 117Mhz */ 928 /* Switch the core clock for ar9271 to 117Mhz */
@@ -930,7 +935,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
930 935
931 REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK); 936 REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK);
932 937
933 if (AR_SREV_9340(ah)) { 938 if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) {
934 if (ah->is_clk_25mhz) { 939 if (ah->is_clk_25mhz) {
935 REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1); 940 REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1);
936 REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7); 941 REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7);
@@ -954,7 +959,7 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
954 AR_IMR_RXORN | 959 AR_IMR_RXORN |
955 AR_IMR_BCNMISC; 960 AR_IMR_BCNMISC;
956 961
957 if (AR_SREV_9340(ah)) 962 if (AR_SREV_9340(ah) || AR_SREV_9550(ah))
958 sync_default &= ~AR_INTR_SYNC_HOST1_FATAL; 963 sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
959 964
960 if (AR_SREV_9300_20_OR_LATER(ah)) { 965 if (AR_SREV_9300_20_OR_LATER(ah)) {
@@ -1371,6 +1376,9 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
1371 } 1376 }
1372 } 1377 }
1373 1378
1379 if (ath9k_hw_mci_is_enabled(ah))
1380 ar9003_mci_check_gpm_offset(ah);
1381
1374 REG_WRITE(ah, AR_RTC_RC, rst_flags); 1382 REG_WRITE(ah, AR_RTC_RC, rst_flags);
1375 1383
1376 REGWRITE_BUFFER_FLUSH(ah); 1384 REGWRITE_BUFFER_FLUSH(ah);
@@ -1455,9 +1463,6 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
1455 break; 1463 break;
1456 } 1464 }
1457 1465
1458 if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
1459 REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
1460
1461 return ret; 1466 return ret;
1462} 1467}
1463 1468
@@ -1733,8 +1738,8 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
1733 ath9k_hw_loadnf(ah, ah->curchan); 1738 ath9k_hw_loadnf(ah, ah->curchan);
1734 ath9k_hw_start_nfcal(ah, true); 1739 ath9k_hw_start_nfcal(ah, true);
1735 1740
1736 if ((ah->caps.hw_caps & ATH9K_HW_CAP_MCI) && ar9003_mci_is_ready(ah)) 1741 if (ath9k_hw_mci_is_enabled(ah))
1737 ar9003_mci_2g5g_switch(ah, true); 1742 ar9003_mci_2g5g_switch(ah, false);
1738 1743
1739 if (AR_SREV_9271(ah)) 1744 if (AR_SREV_9271(ah))
1740 ar9002_hw_load_ani_reg(ah, chan); 1745 ar9002_hw_load_ani_reg(ah, chan);
@@ -1754,10 +1759,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1754 u64 tsf = 0; 1759 u64 tsf = 0;
1755 int i, r; 1760 int i, r;
1756 bool start_mci_reset = false; 1761 bool start_mci_reset = false;
1757 bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI);
1758 bool save_fullsleep = ah->chip_fullsleep; 1762 bool save_fullsleep = ah->chip_fullsleep;
1759 1763
1760 if (mci) { 1764 if (ath9k_hw_mci_is_enabled(ah)) {
1761 start_mci_reset = ar9003_mci_start_reset(ah, chan); 1765 start_mci_reset = ar9003_mci_start_reset(ah, chan);
1762 if (start_mci_reset) 1766 if (start_mci_reset)
1763 return 0; 1767 return 0;
@@ -1786,7 +1790,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1786 return r; 1790 return r;
1787 } 1791 }
1788 1792
1789 if (mci) 1793 if (ath9k_hw_mci_is_enabled(ah))
1790 ar9003_mci_stop_bt(ah, save_fullsleep); 1794 ar9003_mci_stop_bt(ah, save_fullsleep);
1791 1795
1792 saveDefAntenna = REG_READ(ah, AR_DEF_ANTENNA); 1796 saveDefAntenna = REG_READ(ah, AR_DEF_ANTENNA);
@@ -1844,7 +1848,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1844 if (r) 1848 if (r)
1845 return r; 1849 return r;
1846 1850
1847 if (mci) 1851 if (ath9k_hw_mci_is_enabled(ah))
1848 ar9003_mci_reset(ah, false, IS_CHAN_2GHZ(chan), save_fullsleep); 1852 ar9003_mci_reset(ah, false, IS_CHAN_2GHZ(chan), save_fullsleep);
1849 1853
1850 /* 1854 /*
@@ -1939,7 +1943,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1939 1943
1940 ath9k_hw_set_dma(ah); 1944 ath9k_hw_set_dma(ah);
1941 1945
1942 REG_WRITE(ah, AR_OBS, 8); 1946 if (!ath9k_hw_mci_is_enabled(ah))
1947 REG_WRITE(ah, AR_OBS, 8);
1943 1948
1944 if (ah->config.rx_intr_mitigation) { 1949 if (ah->config.rx_intr_mitigation) {
1945 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500); 1950 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500);
@@ -1960,10 +1965,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1960 if (!ath9k_hw_init_cal(ah, chan)) 1965 if (!ath9k_hw_init_cal(ah, chan))
1961 return -EIO; 1966 return -EIO;
1962 1967
1963 ath9k_hw_loadnf(ah, chan); 1968 if (ath9k_hw_mci_is_enabled(ah) && ar9003_mci_end_reset(ah, chan, caldata))
1964 ath9k_hw_start_nfcal(ah, true);
1965
1966 if (mci && ar9003_mci_end_reset(ah, chan, caldata))
1967 return -EIO; 1969 return -EIO;
1968 1970
1969 ENABLE_REGWRITE_BUFFER(ah); 1971 ENABLE_REGWRITE_BUFFER(ah);
@@ -1998,7 +2000,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1998 REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD); 2000 REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
1999 } 2001 }
2000#ifdef __BIG_ENDIAN 2002#ifdef __BIG_ENDIAN
2001 else if (AR_SREV_9330(ah) || AR_SREV_9340(ah)) 2003 else if (AR_SREV_9330(ah) || AR_SREV_9340(ah) ||
2004 AR_SREV_9550(ah))
2002 REG_RMW(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB, 0); 2005 REG_RMW(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB, 0);
2003 else 2006 else
2004 REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD); 2007 REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
@@ -2008,9 +2011,12 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2008 if (ath9k_hw_btcoex_is_enabled(ah)) 2011 if (ath9k_hw_btcoex_is_enabled(ah))
2009 ath9k_hw_btcoex_enable(ah); 2012 ath9k_hw_btcoex_enable(ah);
2010 2013
2011 if (mci) 2014 if (ath9k_hw_mci_is_enabled(ah))
2012 ar9003_mci_check_bt(ah); 2015 ar9003_mci_check_bt(ah);
2013 2016
2017 ath9k_hw_loadnf(ah, chan);
2018 ath9k_hw_start_nfcal(ah, true);
2019
2014 if (AR_SREV_9300_20_OR_LATER(ah)) { 2020 if (AR_SREV_9300_20_OR_LATER(ah)) {
2015 ar9003_hw_bb_watchdog_config(ah); 2021 ar9003_hw_bb_watchdog_config(ah);
2016 2022
@@ -2031,39 +2037,35 @@ EXPORT_SYMBOL(ath9k_hw_reset);
2031 * Notify Power Mgt is disabled in self-generated frames. 2037 * Notify Power Mgt is disabled in self-generated frames.
2032 * If requested, force chip to sleep. 2038 * If requested, force chip to sleep.
2033 */ 2039 */
2034static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip) 2040static void ath9k_set_power_sleep(struct ath_hw *ah)
2035{ 2041{
2036 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); 2042 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
2037 if (setChip) {
2038 if (AR_SREV_9462(ah)) {
2039 REG_WRITE(ah, AR_TIMER_MODE,
2040 REG_READ(ah, AR_TIMER_MODE) & 0xFFFFFF00);
2041 REG_WRITE(ah, AR_NDP2_TIMER_MODE, REG_READ(ah,
2042 AR_NDP2_TIMER_MODE) & 0xFFFFFF00);
2043 REG_WRITE(ah, AR_SLP32_INC,
2044 REG_READ(ah, AR_SLP32_INC) & 0xFFF00000);
2045 /* xxx Required for WLAN only case ? */
2046 REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, 0);
2047 udelay(100);
2048 }
2049 2043
2050 /* 2044 if (AR_SREV_9462(ah)) {
2051 * Clear the RTC force wake bit to allow the 2045 REG_CLR_BIT(ah, AR_TIMER_MODE, 0xff);
2052 * mac to go to sleep. 2046 REG_CLR_BIT(ah, AR_NDP2_TIMER_MODE, 0xff);
2053 */ 2047 REG_CLR_BIT(ah, AR_SLP32_INC, 0xfffff);
2054 REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN); 2048 /* xxx Required for WLAN only case ? */
2049 REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, 0);
2050 udelay(100);
2051 }
2055 2052
2056 if (AR_SREV_9462(ah)) 2053 /*
2057 udelay(100); 2054 * Clear the RTC force wake bit to allow the
2055 * mac to go to sleep.
2056 */
2057 REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN);
2058 2058
2059 if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah)) 2059 if (ath9k_hw_mci_is_enabled(ah))
2060 REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF); 2060 udelay(100);
2061 2061
2062 /* Shutdown chip. Active low */ 2062 if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
2063 if (!AR_SREV_5416(ah) && !AR_SREV_9271(ah)) { 2063 REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
2064 REG_CLR_BIT(ah, AR_RTC_RESET, AR_RTC_RESET_EN); 2064
2065 udelay(2); 2065 /* Shutdown chip. Active low */
2066 } 2066 if (!AR_SREV_5416(ah) && !AR_SREV_9271(ah)) {
2067 REG_CLR_BIT(ah, AR_RTC_RESET, AR_RTC_RESET_EN);
2068 udelay(2);
2067 } 2069 }
2068 2070
2069 /* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */ 2071 /* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */
@@ -2076,44 +2078,38 @@ static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip)
2076 * frames. If request, set power mode of chip to 2078 * frames. If request, set power mode of chip to
2077 * auto/normal. Duration in units of 128us (1/8 TU). 2079 * auto/normal. Duration in units of 128us (1/8 TU).
2078 */ 2080 */
2079static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip) 2081static void ath9k_set_power_network_sleep(struct ath_hw *ah)
2080{ 2082{
2081 u32 val; 2083 struct ath9k_hw_capabilities *pCap = &ah->caps;
2082 2084
2083 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); 2085 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
2084 if (setChip) {
2085 struct ath9k_hw_capabilities *pCap = &ah->caps;
2086 2086
2087 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { 2087 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
2088 /* Set WakeOnInterrupt bit; clear ForceWake bit */ 2088 /* Set WakeOnInterrupt bit; clear ForceWake bit */
2089 REG_WRITE(ah, AR_RTC_FORCE_WAKE, 2089 REG_WRITE(ah, AR_RTC_FORCE_WAKE,
2090 AR_RTC_FORCE_WAKE_ON_INT); 2090 AR_RTC_FORCE_WAKE_ON_INT);
2091 } else { 2091 } else {
2092 2092
2093 /* When chip goes into network sleep, it could be waken 2093 /* When chip goes into network sleep, it could be waken
2094 * up by MCI_INT interrupt caused by BT's HW messages 2094 * up by MCI_INT interrupt caused by BT's HW messages
2095 * (LNA_xxx, CONT_xxx) which chould be in a very fast 2095 * (LNA_xxx, CONT_xxx) which chould be in a very fast
2096 * rate (~100us). This will cause chip to leave and 2096 * rate (~100us). This will cause chip to leave and
2097 * re-enter network sleep mode frequently, which in 2097 * re-enter network sleep mode frequently, which in
2098 * consequence will have WLAN MCI HW to generate lots of 2098 * consequence will have WLAN MCI HW to generate lots of
2099 * SYS_WAKING and SYS_SLEEPING messages which will make 2099 * SYS_WAKING and SYS_SLEEPING messages which will make
2100 * BT CPU to busy to process. 2100 * BT CPU to busy to process.
2101 */ 2101 */
2102 if (AR_SREV_9462(ah)) { 2102 if (ath9k_hw_mci_is_enabled(ah))
2103 val = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_EN) & 2103 REG_CLR_BIT(ah, AR_MCI_INTERRUPT_RX_MSG_EN,
2104 ~AR_MCI_INTERRUPT_RX_HW_MSG_MASK; 2104 AR_MCI_INTERRUPT_RX_HW_MSG_MASK);
2105 REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, val); 2105 /*
2106 } 2106 * Clear the RTC force wake bit to allow the
2107 /* 2107 * mac to go to sleep.
2108 * Clear the RTC force wake bit to allow the 2108 */
2109 * mac to go to sleep. 2109 REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN);
2110 */ 2110
2111 REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, 2111 if (ath9k_hw_mci_is_enabled(ah))
2112 AR_RTC_FORCE_WAKE_EN); 2112 udelay(30);
2113
2114 if (AR_SREV_9462(ah))
2115 udelay(30);
2116 }
2117 } 2113 }
2118 2114
2119 /* Clear Bit 14 of AR_WA after putting chip into Net Sleep mode. */ 2115 /* Clear Bit 14 of AR_WA after putting chip into Net Sleep mode. */
@@ -2121,7 +2117,7 @@ static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip)
2121 REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE); 2117 REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
2122} 2118}
2123 2119
2124static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip) 2120static bool ath9k_hw_set_power_awake(struct ath_hw *ah)
2125{ 2121{
2126 u32 val; 2122 u32 val;
2127 int i; 2123 int i;
@@ -2132,37 +2128,38 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
2132 udelay(10); 2128 udelay(10);
2133 } 2129 }
2134 2130
2135 if (setChip) { 2131 if ((REG_READ(ah, AR_RTC_STATUS) &
2136 if ((REG_READ(ah, AR_RTC_STATUS) & 2132 AR_RTC_STATUS_M) == AR_RTC_STATUS_SHUTDOWN) {
2137 AR_RTC_STATUS_M) == AR_RTC_STATUS_SHUTDOWN) { 2133 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
2138 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) { 2134 return false;
2139 return false;
2140 }
2141 if (!AR_SREV_9300_20_OR_LATER(ah))
2142 ath9k_hw_init_pll(ah, NULL);
2143 } 2135 }
2144 if (AR_SREV_9100(ah)) 2136 if (!AR_SREV_9300_20_OR_LATER(ah))
2145 REG_SET_BIT(ah, AR_RTC_RESET, 2137 ath9k_hw_init_pll(ah, NULL);
2146 AR_RTC_RESET_EN); 2138 }
2139 if (AR_SREV_9100(ah))
2140 REG_SET_BIT(ah, AR_RTC_RESET,
2141 AR_RTC_RESET_EN);
2142
2143 REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
2144 AR_RTC_FORCE_WAKE_EN);
2145 udelay(50);
2146
2147 if (ath9k_hw_mci_is_enabled(ah))
2148 ar9003_mci_set_power_awake(ah);
2147 2149
2150 for (i = POWER_UP_TIME / 50; i > 0; i--) {
2151 val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M;
2152 if (val == AR_RTC_STATUS_ON)
2153 break;
2154 udelay(50);
2148 REG_SET_BIT(ah, AR_RTC_FORCE_WAKE, 2155 REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
2149 AR_RTC_FORCE_WAKE_EN); 2156 AR_RTC_FORCE_WAKE_EN);
2150 udelay(50); 2157 }
2151 2158 if (i == 0) {
2152 for (i = POWER_UP_TIME / 50; i > 0; i--) { 2159 ath_err(ath9k_hw_common(ah),
2153 val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M; 2160 "Failed to wakeup in %uus\n",
2154 if (val == AR_RTC_STATUS_ON) 2161 POWER_UP_TIME / 20);
2155 break; 2162 return false;
2156 udelay(50);
2157 REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
2158 AR_RTC_FORCE_WAKE_EN);
2159 }
2160 if (i == 0) {
2161 ath_err(ath9k_hw_common(ah),
2162 "Failed to wakeup in %uus\n",
2163 POWER_UP_TIME / 20);
2164 return false;
2165 }
2166 } 2163 }
2167 2164
2168 REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); 2165 REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
@@ -2173,7 +2170,7 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
2173bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode) 2170bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
2174{ 2171{
2175 struct ath_common *common = ath9k_hw_common(ah); 2172 struct ath_common *common = ath9k_hw_common(ah);
2176 int status = true, setChip = true; 2173 int status = true;
2177 static const char *modes[] = { 2174 static const char *modes[] = {
2178 "AWAKE", 2175 "AWAKE",
2179 "FULL-SLEEP", 2176 "FULL-SLEEP",
@@ -2189,25 +2186,17 @@ bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
2189 2186
2190 switch (mode) { 2187 switch (mode) {
2191 case ATH9K_PM_AWAKE: 2188 case ATH9K_PM_AWAKE:
2192 status = ath9k_hw_set_power_awake(ah, setChip); 2189 status = ath9k_hw_set_power_awake(ah);
2193
2194 if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
2195 REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
2196
2197 break; 2190 break;
2198 case ATH9K_PM_FULL_SLEEP: 2191 case ATH9K_PM_FULL_SLEEP:
2199 if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) 2192 if (ath9k_hw_mci_is_enabled(ah))
2200 ar9003_mci_set_full_sleep(ah); 2193 ar9003_mci_set_full_sleep(ah);
2201 2194
2202 ath9k_set_power_sleep(ah, setChip); 2195 ath9k_set_power_sleep(ah);
2203 ah->chip_fullsleep = true; 2196 ah->chip_fullsleep = true;
2204 break; 2197 break;
2205 case ATH9K_PM_NETWORK_SLEEP: 2198 case ATH9K_PM_NETWORK_SLEEP:
2206 2199 ath9k_set_power_network_sleep(ah);
2207 if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
2208 REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
2209
2210 ath9k_set_power_network_sleep(ah, setChip);
2211 break; 2200 break;
2212 default: 2201 default:
2213 ath_err(common, "Unknown power mode %u\n", mode); 2202 ath_err(common, "Unknown power mode %u\n", mode);
@@ -2777,6 +2766,9 @@ EXPORT_SYMBOL(ath9k_hw_setrxfilter);
2777 2766
2778bool ath9k_hw_phy_disable(struct ath_hw *ah) 2767bool ath9k_hw_phy_disable(struct ath_hw *ah)
2779{ 2768{
2769 if (ath9k_hw_mci_is_enabled(ah))
2770 ar9003_mci_bt_gain_ctrl(ah);
2771
2780 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM)) 2772 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM))
2781 return false; 2773 return false;
2782 2774
@@ -3162,6 +3154,7 @@ static struct {
3162 { AR_SREV_VERSION_9340, "9340" }, 3154 { AR_SREV_VERSION_9340, "9340" },
3163 { AR_SREV_VERSION_9485, "9485" }, 3155 { AR_SREV_VERSION_9485, "9485" },
3164 { AR_SREV_VERSION_9462, "9462" }, 3156 { AR_SREV_VERSION_9462, "9462" },
3157 { AR_SREV_VERSION_9550, "9550" },
3165}; 3158};
3166 3159
3167/* For devices with external radios */ 3160/* For devices with external radios */
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index b620c557c2a6..26da1732978d 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -48,6 +48,7 @@
48#define AR9300_DEVID_AR9580 0x0033 48#define AR9300_DEVID_AR9580 0x0033
49#define AR9300_DEVID_AR9462 0x0034 49#define AR9300_DEVID_AR9462 0x0034
50#define AR9300_DEVID_AR9330 0x0035 50#define AR9300_DEVID_AR9330 0x0035
51#define AR9300_DEVID_QCA955X 0x0038
51 52
52#define AR5416_AR9100_DEVID 0x000b 53#define AR5416_AR9100_DEVID 0x000b
53 54
@@ -818,13 +819,13 @@ struct ath_hw {
818 struct ar5416IniArray iniModesFastClock; 819 struct ar5416IniArray iniModesFastClock;
819 struct ar5416IniArray iniAdditional; 820 struct ar5416IniArray iniAdditional;
820 struct ar5416IniArray iniModesRxGain; 821 struct ar5416IniArray iniModesRxGain;
822 struct ar5416IniArray ini_modes_rx_gain_bounds;
821 struct ar5416IniArray iniModesTxGain; 823 struct ar5416IniArray iniModesTxGain;
822 struct ar5416IniArray iniCckfirNormal; 824 struct ar5416IniArray iniCckfirNormal;
823 struct ar5416IniArray iniCckfirJapan2484; 825 struct ar5416IniArray iniCckfirJapan2484;
824 struct ar5416IniArray ini_japan2484; 826 struct ar5416IniArray ini_japan2484;
825 struct ar5416IniArray iniModes_9271_ANI_reg; 827 struct ar5416IniArray iniModes_9271_ANI_reg;
826 struct ar5416IniArray ini_radio_post_sys2ant; 828 struct ar5416IniArray ini_radio_post_sys2ant;
827 struct ar5416IniArray ini_BTCOEX_MAX_TXPWR;
828 829
829 struct ar5416IniArray iniMac[ATH_INI_NUM_SPLIT]; 830 struct ar5416IniArray iniMac[ATH_INI_NUM_SPLIT];
830 struct ar5416IniArray iniBB[ATH_INI_NUM_SPLIT]; 831 struct ar5416IniArray iniBB[ATH_INI_NUM_SPLIT];
@@ -1020,16 +1021,8 @@ void ar9002_hw_attach_ops(struct ath_hw *ah);
1020void ar9003_hw_attach_ops(struct ath_hw *ah); 1021void ar9003_hw_attach_ops(struct ath_hw *ah);
1021 1022
1022void ar9002_hw_load_ani_reg(struct ath_hw *ah, struct ath9k_channel *chan); 1023void ar9002_hw_load_ani_reg(struct ath_hw *ah, struct ath9k_channel *chan);
1023/* 1024
1024 * ANI work can be shared between all families but a next
1025 * generation implementation of ANI will be used only for AR9003 only
1026 * for now as the other families still need to be tested with the same
1027 * next generation ANI. Feel free to start testing it though for the
1028 * older families (AR5008, AR9001, AR9002) by using modparam_force_new_ani.
1029 */
1030extern int modparam_force_new_ani;
1031void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning); 1025void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning);
1032void ath9k_hw_proc_mib_event(struct ath_hw *ah);
1033void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan); 1026void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan);
1034 1027
1035#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT 1028#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
@@ -1037,6 +1030,12 @@ static inline bool ath9k_hw_btcoex_is_enabled(struct ath_hw *ah)
1037{ 1030{
1038 return ah->btcoex_hw.enabled; 1031 return ah->btcoex_hw.enabled;
1039} 1032}
1033static inline bool ath9k_hw_mci_is_enabled(struct ath_hw *ah)
1034{
1035 return ah->common.btcoex_enabled &&
1036 (ah->caps.hw_caps & ATH9K_HW_CAP_MCI);
1037
1038}
1040void ath9k_hw_btcoex_enable(struct ath_hw *ah); 1039void ath9k_hw_btcoex_enable(struct ath_hw *ah);
1041static inline enum ath_btcoex_scheme 1040static inline enum ath_btcoex_scheme
1042ath9k_hw_get_btcoex_scheme(struct ath_hw *ah) 1041ath9k_hw_get_btcoex_scheme(struct ath_hw *ah)
@@ -1048,6 +1047,10 @@ static inline bool ath9k_hw_btcoex_is_enabled(struct ath_hw *ah)
1048{ 1047{
1049 return false; 1048 return false;
1050} 1049}
1050static inline bool ath9k_hw_mci_is_enabled(struct ath_hw *ah)
1051{
1052 return false;
1053}
1051static inline void ath9k_hw_btcoex_enable(struct ath_hw *ah) 1054static inline void ath9k_hw_btcoex_enable(struct ath_hw *ah)
1052{ 1055{
1053} 1056}
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index dee9e092449a..9dfce1a69c73 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -489,6 +489,7 @@ static void ath9k_init_misc(struct ath_softc *sc)
489 489
490 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc); 490 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
491 491
492 sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
492 sc->config.txpowlimit = ATH_TXPOWER_MAX; 493 sc->config.txpowlimit = ATH_TXPOWER_MAX;
493 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN); 494 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
494 sc->beacon.slottime = ATH9K_SLOT_TIME_9; 495 sc->beacon.slottime = ATH9K_SLOT_TIME_9;
@@ -560,6 +561,12 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
560 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet, 561 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
561 (unsigned long)sc); 562 (unsigned long)sc);
562 563
564 INIT_WORK(&sc->hw_reset_work, ath_reset_work);
565 INIT_WORK(&sc->hw_check_work, ath_hw_check);
566 INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
567 INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
568 setup_timer(&sc->rx_poll_timer, ath_rx_poll, (unsigned long)sc);
569
563 /* 570 /*
564 * Cache line size is used to size and align various 571 * Cache line size is used to size and align various
565 * structures used to communicate with the hardware. 572 * structures used to communicate with the hardware.
@@ -590,6 +597,9 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
590 ath9k_cmn_init_crypto(sc->sc_ah); 597 ath9k_cmn_init_crypto(sc->sc_ah);
591 ath9k_init_misc(sc); 598 ath9k_init_misc(sc);
592 599
600 if (common->bus_ops->aspm_init)
601 common->bus_ops->aspm_init(common);
602
593 return 0; 603 return 0;
594 604
595err_btcoex: 605err_btcoex:
@@ -782,11 +792,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
782 ARRAY_SIZE(ath9k_tpt_blink)); 792 ARRAY_SIZE(ath9k_tpt_blink));
783#endif 793#endif
784 794
785 INIT_WORK(&sc->hw_reset_work, ath_reset_work);
786 INIT_WORK(&sc->hw_check_work, ath_hw_check);
787 INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
788 INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
789
790 /* Register with mac80211 */ 795 /* Register with mac80211 */
791 error = ieee80211_register_hw(hw); 796 error = ieee80211_register_hw(hw);
792 if (error) 797 if (error)
@@ -805,9 +810,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
805 goto error_world; 810 goto error_world;
806 } 811 }
807 812
808 setup_timer(&sc->rx_poll_timer, ath_rx_poll, (unsigned long)sc);
809 sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
810
811 ath_init_leds(sc); 813 ath_init_leds(sc);
812 ath_start_rfkill_poll(sc); 814 ath_start_rfkill_poll(sc);
813 815
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
new file mode 100644
index 000000000000..91650fe50461
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -0,0 +1,510 @@
1/*
2 * Copyright (c) 2012 Qualcomm Atheros, Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "ath9k.h"
18
19/*
20 * TX polling - checks if the TX engine is stuck somewhere
21 * and issues a chip reset if so.
22 */
23void ath_tx_complete_poll_work(struct work_struct *work)
24{
25 struct ath_softc *sc = container_of(work, struct ath_softc,
26 tx_complete_work.work);
27 struct ath_txq *txq;
28 int i;
29 bool needreset = false;
30#ifdef CONFIG_ATH9K_DEBUGFS
31 sc->tx_complete_poll_work_seen++;
32#endif
33
34 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
35 if (ATH_TXQ_SETUP(sc, i)) {
36 txq = &sc->tx.txq[i];
37 ath_txq_lock(sc, txq);
38 if (txq->axq_depth) {
39 if (txq->axq_tx_inprogress) {
40 needreset = true;
41 ath_txq_unlock(sc, txq);
42 break;
43 } else {
44 txq->axq_tx_inprogress = true;
45 }
46 }
47 ath_txq_unlock_complete(sc, txq);
48 }
49
50 if (needreset) {
51 ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
52 "tx hung, resetting the chip\n");
53 RESET_STAT_INC(sc, RESET_TYPE_TX_HANG);
54 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
55 return;
56 }
57
58 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
59 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
60}
61
62/*
63 * Checks if the BB/MAC is hung.
64 */
65void ath_hw_check(struct work_struct *work)
66{
67 struct ath_softc *sc = container_of(work, struct ath_softc, hw_check_work);
68 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
69 unsigned long flags;
70 int busy;
71 u8 is_alive, nbeacon = 1;
72
73 ath9k_ps_wakeup(sc);
74 is_alive = ath9k_hw_check_alive(sc->sc_ah);
75
76 if (is_alive && !AR_SREV_9300(sc->sc_ah))
77 goto out;
78 else if (!is_alive && AR_SREV_9300(sc->sc_ah)) {
79 ath_dbg(common, RESET,
80 "DCU stuck is detected. Schedule chip reset\n");
81 RESET_STAT_INC(sc, RESET_TYPE_MAC_HANG);
82 goto sched_reset;
83 }
84
85 spin_lock_irqsave(&common->cc_lock, flags);
86 busy = ath_update_survey_stats(sc);
87 spin_unlock_irqrestore(&common->cc_lock, flags);
88
89 ath_dbg(common, RESET, "Possible baseband hang, busy=%d (try %d)\n",
90 busy, sc->hw_busy_count + 1);
91 if (busy >= 99) {
92 if (++sc->hw_busy_count >= 3) {
93 RESET_STAT_INC(sc, RESET_TYPE_BB_HANG);
94 goto sched_reset;
95 }
96 } else if (busy >= 0) {
97 sc->hw_busy_count = 0;
98 nbeacon = 3;
99 }
100
101 ath_start_rx_poll(sc, nbeacon);
102 goto out;
103
104sched_reset:
105 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
106out:
107 ath9k_ps_restore(sc);
108}
109
110/*
111 * PLL-WAR for AR9485/AR9340
112 */
113static bool ath_hw_pll_rx_hang_check(struct ath_softc *sc, u32 pll_sqsum)
114{
115 static int count;
116 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
117
118 if (pll_sqsum >= 0x40000) {
119 count++;
120 if (count == 3) {
121 ath_dbg(common, RESET, "PLL WAR, resetting the chip\n");
122 RESET_STAT_INC(sc, RESET_TYPE_PLL_HANG);
123 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
124 count = 0;
125 return true;
126 }
127 } else {
128 count = 0;
129 }
130
131 return false;
132}
133
134void ath_hw_pll_work(struct work_struct *work)
135{
136 u32 pll_sqsum;
137 struct ath_softc *sc = container_of(work, struct ath_softc,
138 hw_pll_work.work);
139 /*
140 * ensure that the PLL WAR is executed only
141 * after the STA is associated (or) if the
142 * beaconing had started in interfaces that
143 * uses beacons.
144 */
145 if (!test_bit(SC_OP_BEACONS, &sc->sc_flags))
146 return;
147
148 ath9k_ps_wakeup(sc);
149 pll_sqsum = ar9003_get_pll_sqsum_dvc(sc->sc_ah);
150 ath9k_ps_restore(sc);
151 if (ath_hw_pll_rx_hang_check(sc, pll_sqsum))
152 return;
153
154 ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work,
155 msecs_to_jiffies(ATH_PLL_WORK_INTERVAL));
156}
157
158/*
159 * RX Polling - monitors baseband hangs.
160 */
161void ath_start_rx_poll(struct ath_softc *sc, u8 nbeacon)
162{
163 if (!AR_SREV_9300(sc->sc_ah))
164 return;
165
166 if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags))
167 return;
168
169 mod_timer(&sc->rx_poll_timer, jiffies + msecs_to_jiffies
170 (nbeacon * sc->cur_beacon_conf.beacon_interval));
171}
172
173void ath_rx_poll(unsigned long data)
174{
175 struct ath_softc *sc = (struct ath_softc *)data;
176
177 ieee80211_queue_work(sc->hw, &sc->hw_check_work);
178}
179
180/*
181 * PA Pre-distortion.
182 */
183static void ath_paprd_activate(struct ath_softc *sc)
184{
185 struct ath_hw *ah = sc->sc_ah;
186 struct ath9k_hw_cal_data *caldata = ah->caldata;
187 int chain;
188
189 if (!caldata || !caldata->paprd_done)
190 return;
191
192 ath9k_ps_wakeup(sc);
193 ar9003_paprd_enable(ah, false);
194 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
195 if (!(ah->txchainmask & BIT(chain)))
196 continue;
197
198 ar9003_paprd_populate_single_table(ah, caldata, chain);
199 }
200
201 ar9003_paprd_enable(ah, true);
202 ath9k_ps_restore(sc);
203}
204
205static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int chain)
206{
207 struct ieee80211_hw *hw = sc->hw;
208 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
209 struct ath_hw *ah = sc->sc_ah;
210 struct ath_common *common = ath9k_hw_common(ah);
211 struct ath_tx_control txctl;
212 int time_left;
213
214 memset(&txctl, 0, sizeof(txctl));
215 txctl.txq = sc->tx.txq_map[WME_AC_BE];
216
217 memset(tx_info, 0, sizeof(*tx_info));
218 tx_info->band = hw->conf.channel->band;
219 tx_info->flags |= IEEE80211_TX_CTL_NO_ACK;
220 tx_info->control.rates[0].idx = 0;
221 tx_info->control.rates[0].count = 1;
222 tx_info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
223 tx_info->control.rates[1].idx = -1;
224
225 init_completion(&sc->paprd_complete);
226 txctl.paprd = BIT(chain);
227
228 if (ath_tx_start(hw, skb, &txctl) != 0) {
229 ath_dbg(common, CALIBRATE, "PAPRD TX failed\n");
230 dev_kfree_skb_any(skb);
231 return false;
232 }
233
234 time_left = wait_for_completion_timeout(&sc->paprd_complete,
235 msecs_to_jiffies(ATH_PAPRD_TIMEOUT));
236
237 if (!time_left)
238 ath_dbg(common, CALIBRATE,
239 "Timeout waiting for paprd training on TX chain %d\n",
240 chain);
241
242 return !!time_left;
243}
244
245void ath_paprd_calibrate(struct work_struct *work)
246{
247 struct ath_softc *sc = container_of(work, struct ath_softc, paprd_work);
248 struct ieee80211_hw *hw = sc->hw;
249 struct ath_hw *ah = sc->sc_ah;
250 struct ieee80211_hdr *hdr;
251 struct sk_buff *skb = NULL;
252 struct ath9k_hw_cal_data *caldata = ah->caldata;
253 struct ath_common *common = ath9k_hw_common(ah);
254 int ftype;
255 int chain_ok = 0;
256 int chain;
257 int len = 1800;
258
259 if (!caldata)
260 return;
261
262 ath9k_ps_wakeup(sc);
263
264 if (ar9003_paprd_init_table(ah) < 0)
265 goto fail_paprd;
266
267 skb = alloc_skb(len, GFP_KERNEL);
268 if (!skb)
269 goto fail_paprd;
270
271 skb_put(skb, len);
272 memset(skb->data, 0, len);
273 hdr = (struct ieee80211_hdr *)skb->data;
274 ftype = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC;
275 hdr->frame_control = cpu_to_le16(ftype);
276 hdr->duration_id = cpu_to_le16(10);
277 memcpy(hdr->addr1, hw->wiphy->perm_addr, ETH_ALEN);
278 memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
279 memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
280
281 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
282 if (!(ah->txchainmask & BIT(chain)))
283 continue;
284
285 chain_ok = 0;
286
287 ath_dbg(common, CALIBRATE,
288 "Sending PAPRD frame for thermal measurement on chain %d\n",
289 chain);
290 if (!ath_paprd_send_frame(sc, skb, chain))
291 goto fail_paprd;
292
293 ar9003_paprd_setup_gain_table(ah, chain);
294
295 ath_dbg(common, CALIBRATE,
296 "Sending PAPRD training frame on chain %d\n", chain);
297 if (!ath_paprd_send_frame(sc, skb, chain))
298 goto fail_paprd;
299
300 if (!ar9003_paprd_is_done(ah)) {
301 ath_dbg(common, CALIBRATE,
302 "PAPRD not yet done on chain %d\n", chain);
303 break;
304 }
305
306 if (ar9003_paprd_create_curve(ah, caldata, chain)) {
307 ath_dbg(common, CALIBRATE,
308 "PAPRD create curve failed on chain %d\n",
309 chain);
310 break;
311 }
312
313 chain_ok = 1;
314 }
315 kfree_skb(skb);
316
317 if (chain_ok) {
318 caldata->paprd_done = true;
319 ath_paprd_activate(sc);
320 }
321
322fail_paprd:
323 ath9k_ps_restore(sc);
324}
325
326/*
327 * ANI performs periodic noise floor calibration
328 * that is used to adjust and optimize the chip performance. This
329 * takes environmental changes (location, temperature) into account.
330 * When the task is complete, it reschedules itself depending on the
331 * appropriate interval that was calculated.
332 */
333void ath_ani_calibrate(unsigned long data)
334{
335 struct ath_softc *sc = (struct ath_softc *)data;
336 struct ath_hw *ah = sc->sc_ah;
337 struct ath_common *common = ath9k_hw_common(ah);
338 bool longcal = false;
339 bool shortcal = false;
340 bool aniflag = false;
341 unsigned int timestamp = jiffies_to_msecs(jiffies);
342 u32 cal_interval, short_cal_interval, long_cal_interval;
343 unsigned long flags;
344
345 if (ah->caldata && ah->caldata->nfcal_interference)
346 long_cal_interval = ATH_LONG_CALINTERVAL_INT;
347 else
348 long_cal_interval = ATH_LONG_CALINTERVAL;
349
350 short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
351 ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
352
353 /* Only calibrate if awake */
354 if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE)
355 goto set_timer;
356
357 ath9k_ps_wakeup(sc);
358
359 /* Long calibration runs independently of short calibration. */
360 if ((timestamp - common->ani.longcal_timer) >= long_cal_interval) {
361 longcal = true;
362 common->ani.longcal_timer = timestamp;
363 }
364
365 /* Short calibration applies only while caldone is false */
366 if (!common->ani.caldone) {
367 if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) {
368 shortcal = true;
369 common->ani.shortcal_timer = timestamp;
370 common->ani.resetcal_timer = timestamp;
371 }
372 } else {
373 if ((timestamp - common->ani.resetcal_timer) >=
374 ATH_RESTART_CALINTERVAL) {
375 common->ani.caldone = ath9k_hw_reset_calvalid(ah);
376 if (common->ani.caldone)
377 common->ani.resetcal_timer = timestamp;
378 }
379 }
380
381 /* Verify whether we must check ANI */
382 if (sc->sc_ah->config.enable_ani
383 && (timestamp - common->ani.checkani_timer) >=
384 ah->config.ani_poll_interval) {
385 aniflag = true;
386 common->ani.checkani_timer = timestamp;
387 }
388
389 /* Call ANI routine if necessary */
390 if (aniflag) {
391 spin_lock_irqsave(&common->cc_lock, flags);
392 ath9k_hw_ani_monitor(ah, ah->curchan);
393 ath_update_survey_stats(sc);
394 spin_unlock_irqrestore(&common->cc_lock, flags);
395 }
396
397 /* Perform calibration if necessary */
398 if (longcal || shortcal) {
399 common->ani.caldone =
400 ath9k_hw_calibrate(ah, ah->curchan,
401 ah->rxchainmask, longcal);
402 }
403
404 ath_dbg(common, ANI,
405 "Calibration @%lu finished: %s %s %s, caldone: %s\n",
406 jiffies,
407 longcal ? "long" : "", shortcal ? "short" : "",
408 aniflag ? "ani" : "", common->ani.caldone ? "true" : "false");
409
410 ath9k_debug_samp_bb_mac(sc);
411 ath9k_ps_restore(sc);
412
413set_timer:
414 /*
415 * Set timer interval based on previous results.
416 * The interval must be the shortest necessary to satisfy ANI,
417 * short calibration and long calibration.
418 */
419 cal_interval = ATH_LONG_CALINTERVAL;
420 if (sc->sc_ah->config.enable_ani)
421 cal_interval = min(cal_interval,
422 (u32)ah->config.ani_poll_interval);
423 if (!common->ani.caldone)
424 cal_interval = min(cal_interval, (u32)short_cal_interval);
425
426 mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
427 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_PAPRD) && ah->caldata) {
428 if (!ah->caldata->paprd_done)
429 ieee80211_queue_work(sc->hw, &sc->paprd_work);
430 else if (!ah->paprd_table_write_done)
431 ath_paprd_activate(sc);
432 }
433}
434
435void ath_start_ani(struct ath_common *common)
436{
437 struct ath_hw *ah = common->ah;
438 unsigned long timestamp = jiffies_to_msecs(jiffies);
439 struct ath_softc *sc = (struct ath_softc *) common->priv;
440
441 if (!test_bit(SC_OP_ANI_RUN, &sc->sc_flags))
442 return;
443
444 if (sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
445 return;
446
447 common->ani.longcal_timer = timestamp;
448 common->ani.shortcal_timer = timestamp;
449 common->ani.checkani_timer = timestamp;
450
451 mod_timer(&common->ani.timer,
452 jiffies + msecs_to_jiffies((u32)ah->config.ani_poll_interval));
453}
454
455void ath_update_survey_nf(struct ath_softc *sc, int channel)
456{
457 struct ath_hw *ah = sc->sc_ah;
458 struct ath9k_channel *chan = &ah->channels[channel];
459 struct survey_info *survey = &sc->survey[channel];
460
461 if (chan->noisefloor) {
462 survey->filled |= SURVEY_INFO_NOISE_DBM;
463 survey->noise = ath9k_hw_getchan_noise(ah, chan);
464 }
465}
466
467/*
468 * Updates the survey statistics and returns the busy time since last
469 * update in %, if the measurement duration was long enough for the
470 * result to be useful, -1 otherwise.
471 */
472int ath_update_survey_stats(struct ath_softc *sc)
473{
474 struct ath_hw *ah = sc->sc_ah;
475 struct ath_common *common = ath9k_hw_common(ah);
476 int pos = ah->curchan - &ah->channels[0];
477 struct survey_info *survey = &sc->survey[pos];
478 struct ath_cycle_counters *cc = &common->cc_survey;
479 unsigned int div = common->clockrate * 1000;
480 int ret = 0;
481
482 if (!ah->curchan)
483 return -1;
484
485 if (ah->power_mode == ATH9K_PM_AWAKE)
486 ath_hw_cycle_counters_update(common);
487
488 if (cc->cycles > 0) {
489 survey->filled |= SURVEY_INFO_CHANNEL_TIME |
490 SURVEY_INFO_CHANNEL_TIME_BUSY |
491 SURVEY_INFO_CHANNEL_TIME_RX |
492 SURVEY_INFO_CHANNEL_TIME_TX;
493 survey->channel_time += cc->cycles / div;
494 survey->channel_time_busy += cc->rx_busy / div;
495 survey->channel_time_rx += cc->rx_frame / div;
496 survey->channel_time_tx += cc->tx_frame / div;
497 }
498
499 if (cc->cycles < div)
500 return -1;
501
502 if (cc->cycles > 0)
503 ret = cc->rx_busy * 100 / cc->cycles;
504
505 memset(cc, 0, sizeof(*cc));
506
507 ath_update_survey_nf(sc, pos);
508
509 return ret;
510}
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 04ef775ccee1..7990cd55599c 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -810,7 +810,7 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
810 return; 810 return;
811 } 811 }
812 812
813 if (AR_SREV_9340(ah)) 813 if (AR_SREV_9340(ah) || AR_SREV_9550(ah))
814 sync_default &= ~AR_INTR_SYNC_HOST1_FATAL; 814 sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
815 815
816 async_mask = AR_INTR_MAC_IRQ; 816 async_mask = AR_INTR_MAC_IRQ;
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 21c955609e6c..0eba36dca6f8 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -646,6 +646,7 @@ enum ath9k_rx_filter {
646 ATH9K_RX_FILTER_PHYRADAR = 0x00002000, 646 ATH9K_RX_FILTER_PHYRADAR = 0x00002000,
647 ATH9K_RX_FILTER_MCAST_BCAST_ALL = 0x00008000, 647 ATH9K_RX_FILTER_MCAST_BCAST_ALL = 0x00008000,
648 ATH9K_RX_FILTER_CONTROL_WRAPPER = 0x00080000, 648 ATH9K_RX_FILTER_CONTROL_WRAPPER = 0x00080000,
649 ATH9K_RX_FILTER_4ADDRESS = 0x00100000,
649}; 650};
650 651
651#define ATH9K_RATESERIES_RTS_CTS 0x0001 652#define ATH9K_RATESERIES_RTS_CTS 0x0001
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index dac1a2709e3c..248e5b24acfa 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -19,7 +19,7 @@
19#include "ath9k.h" 19#include "ath9k.h"
20#include "btcoex.h" 20#include "btcoex.h"
21 21
22static u8 parse_mpdudensity(u8 mpdudensity) 22u8 ath9k_parse_mpdudensity(u8 mpdudensity)
23{ 23{
24 /* 24 /*
25 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing": 25 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
@@ -101,6 +101,7 @@ void ath9k_ps_wakeup(struct ath_softc *sc)
101 spin_lock(&common->cc_lock); 101 spin_lock(&common->cc_lock);
102 ath_hw_cycle_counters_update(common); 102 ath_hw_cycle_counters_update(common);
103 memset(&common->cc_survey, 0, sizeof(common->cc_survey)); 103 memset(&common->cc_survey, 0, sizeof(common->cc_survey));
104 memset(&common->cc_ani, 0, sizeof(common->cc_ani));
104 spin_unlock(&common->cc_lock); 105 spin_unlock(&common->cc_lock);
105 } 106 }
106 107
@@ -129,6 +130,8 @@ void ath9k_ps_restore(struct ath_softc *sc)
129 PS_WAIT_FOR_PSPOLL_DATA | 130 PS_WAIT_FOR_PSPOLL_DATA |
130 PS_WAIT_FOR_TX_ACK))) { 131 PS_WAIT_FOR_TX_ACK))) {
131 mode = ATH9K_PM_NETWORK_SLEEP; 132 mode = ATH9K_PM_NETWORK_SLEEP;
133 if (ath9k_hw_btcoex_is_enabled(sc->sc_ah))
134 ath9k_btcoex_stop_gen_timer(sc);
132 } else { 135 } else {
133 goto unlock; 136 goto unlock;
134 } 137 }
@@ -143,90 +146,17 @@ void ath9k_ps_restore(struct ath_softc *sc)
143 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 146 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
144} 147}
145 148
146void ath_start_ani(struct ath_common *common)
147{
148 struct ath_hw *ah = common->ah;
149 unsigned long timestamp = jiffies_to_msecs(jiffies);
150 struct ath_softc *sc = (struct ath_softc *) common->priv;
151
152 if (!(sc->sc_flags & SC_OP_ANI_RUN))
153 return;
154
155 if (sc->sc_flags & SC_OP_OFFCHANNEL)
156 return;
157
158 common->ani.longcal_timer = timestamp;
159 common->ani.shortcal_timer = timestamp;
160 common->ani.checkani_timer = timestamp;
161
162 mod_timer(&common->ani.timer,
163 jiffies +
164 msecs_to_jiffies((u32)ah->config.ani_poll_interval));
165}
166
167static void ath_update_survey_nf(struct ath_softc *sc, int channel)
168{
169 struct ath_hw *ah = sc->sc_ah;
170 struct ath9k_channel *chan = &ah->channels[channel];
171 struct survey_info *survey = &sc->survey[channel];
172
173 if (chan->noisefloor) {
174 survey->filled |= SURVEY_INFO_NOISE_DBM;
175 survey->noise = ath9k_hw_getchan_noise(ah, chan);
176 }
177}
178
179/*
180 * Updates the survey statistics and returns the busy time since last
181 * update in %, if the measurement duration was long enough for the
182 * result to be useful, -1 otherwise.
183 */
184static int ath_update_survey_stats(struct ath_softc *sc)
185{
186 struct ath_hw *ah = sc->sc_ah;
187 struct ath_common *common = ath9k_hw_common(ah);
188 int pos = ah->curchan - &ah->channels[0];
189 struct survey_info *survey = &sc->survey[pos];
190 struct ath_cycle_counters *cc = &common->cc_survey;
191 unsigned int div = common->clockrate * 1000;
192 int ret = 0;
193
194 if (!ah->curchan)
195 return -1;
196
197 if (ah->power_mode == ATH9K_PM_AWAKE)
198 ath_hw_cycle_counters_update(common);
199
200 if (cc->cycles > 0) {
201 survey->filled |= SURVEY_INFO_CHANNEL_TIME |
202 SURVEY_INFO_CHANNEL_TIME_BUSY |
203 SURVEY_INFO_CHANNEL_TIME_RX |
204 SURVEY_INFO_CHANNEL_TIME_TX;
205 survey->channel_time += cc->cycles / div;
206 survey->channel_time_busy += cc->rx_busy / div;
207 survey->channel_time_rx += cc->rx_frame / div;
208 survey->channel_time_tx += cc->tx_frame / div;
209 }
210
211 if (cc->cycles < div)
212 return -1;
213
214 if (cc->cycles > 0)
215 ret = cc->rx_busy * 100 / cc->cycles;
216
217 memset(cc, 0, sizeof(*cc));
218
219 ath_update_survey_nf(sc, pos);
220
221 return ret;
222}
223
224static void __ath_cancel_work(struct ath_softc *sc) 149static void __ath_cancel_work(struct ath_softc *sc)
225{ 150{
226 cancel_work_sync(&sc->paprd_work); 151 cancel_work_sync(&sc->paprd_work);
227 cancel_work_sync(&sc->hw_check_work); 152 cancel_work_sync(&sc->hw_check_work);
228 cancel_delayed_work_sync(&sc->tx_complete_work); 153 cancel_delayed_work_sync(&sc->tx_complete_work);
229 cancel_delayed_work_sync(&sc->hw_pll_work); 154 cancel_delayed_work_sync(&sc->hw_pll_work);
155
156#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
157 if (ath9k_hw_mci_is_enabled(sc->sc_ah))
158 cancel_work_sync(&sc->mci_work);
159#endif
230} 160}
231 161
232static void ath_cancel_work(struct ath_softc *sc) 162static void ath_cancel_work(struct ath_softc *sc)
@@ -235,6 +165,23 @@ static void ath_cancel_work(struct ath_softc *sc)
235 cancel_work_sync(&sc->hw_reset_work); 165 cancel_work_sync(&sc->hw_reset_work);
236} 166}
237 167
168static void ath_restart_work(struct ath_softc *sc)
169{
170 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
171
172 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
173
174 if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9485(sc->sc_ah) ||
175 AR_SREV_9550(sc->sc_ah))
176 ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work,
177 msecs_to_jiffies(ATH_PLL_WORK_INTERVAL));
178
179 ath_start_rx_poll(sc, 3);
180
181 if (!common->disable_ani)
182 ath_start_ani(common);
183}
184
238static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush) 185static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush)
239{ 186{
240 struct ath_hw *ah = sc->sc_ah; 187 struct ath_hw *ah = sc->sc_ah;
@@ -271,6 +218,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
271{ 218{
272 struct ath_hw *ah = sc->sc_ah; 219 struct ath_hw *ah = sc->sc_ah;
273 struct ath_common *common = ath9k_hw_common(ah); 220 struct ath_common *common = ath9k_hw_common(ah);
221 unsigned long flags;
274 222
275 if (ath_startrecv(sc) != 0) { 223 if (ath_startrecv(sc) != 0) {
276 ath_err(common, "Unable to restart recv logic\n"); 224 ath_err(common, "Unable to restart recv logic\n");
@@ -279,36 +227,30 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
279 227
280 ath9k_cmn_update_txpow(ah, sc->curtxpow, 228 ath9k_cmn_update_txpow(ah, sc->curtxpow,
281 sc->config.txpowlimit, &sc->curtxpow); 229 sc->config.txpowlimit, &sc->curtxpow);
230
231 clear_bit(SC_OP_HW_RESET, &sc->sc_flags);
282 ath9k_hw_set_interrupts(ah); 232 ath9k_hw_set_interrupts(ah);
283 ath9k_hw_enable_interrupts(ah); 233 ath9k_hw_enable_interrupts(ah);
284 234
285 if (!(sc->sc_flags & (SC_OP_OFFCHANNEL)) && start) { 235 if (!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) && start) {
286 if (sc->sc_flags & SC_OP_BEACONS) 236 if (!test_bit(SC_OP_BEACONS, &sc->sc_flags))
287 ath_set_beacon(sc); 237 goto work;
288
289 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
290 ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/2);
291 ath_start_rx_poll(sc, 3);
292 if (!common->disable_ani)
293 ath_start_ani(common);
294 }
295
296 if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx != 3) {
297 struct ath_hw_antcomb_conf div_ant_conf;
298 u8 lna_conf;
299
300 ath9k_hw_antdiv_comb_conf_get(ah, &div_ant_conf);
301 238
302 if (sc->ant_rx == 1) 239 ath_set_beacon(sc);
303 lna_conf = ATH_ANT_DIV_COMB_LNA1;
304 else
305 lna_conf = ATH_ANT_DIV_COMB_LNA2;
306 div_ant_conf.main_lna_conf = lna_conf;
307 div_ant_conf.alt_lna_conf = lna_conf;
308 240
309 ath9k_hw_antdiv_comb_conf_set(ah, &div_ant_conf); 241 if (ah->opmode == NL80211_IFTYPE_STATION &&
242 test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
243 spin_lock_irqsave(&sc->sc_pm_lock, flags);
244 sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
245 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
246 }
247 work:
248 ath_restart_work(sc);
310 } 249 }
311 250
251 if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx != 3)
252 ath_ant_comb_update(sc);
253
312 ieee80211_wake_queues(sc->hw); 254 ieee80211_wake_queues(sc->hw);
313 255
314 return true; 256 return true;
@@ -328,7 +270,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan,
328 270
329 spin_lock_bh(&sc->sc_pcu_lock); 271 spin_lock_bh(&sc->sc_pcu_lock);
330 272
331 if (!(sc->sc_flags & SC_OP_OFFCHANNEL)) { 273 if (!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) {
332 fastcc = false; 274 fastcc = false;
333 caldata = &sc->caldata; 275 caldata = &sc->caldata;
334 } 276 }
@@ -371,7 +313,7 @@ static int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
371{ 313{
372 int r; 314 int r;
373 315
374 if (sc->sc_flags & SC_OP_INVALID) 316 if (test_bit(SC_OP_INVALID, &sc->sc_flags))
375 return -EIO; 317 return -EIO;
376 318
377 r = ath_reset_internal(sc, hchan, false); 319 r = ath_reset_internal(sc, hchan, false);
@@ -379,262 +321,11 @@ static int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
379 return r; 321 return r;
380} 322}
381 323
382static void ath_paprd_activate(struct ath_softc *sc)
383{
384 struct ath_hw *ah = sc->sc_ah;
385 struct ath9k_hw_cal_data *caldata = ah->caldata;
386 int chain;
387
388 if (!caldata || !caldata->paprd_done)
389 return;
390
391 ath9k_ps_wakeup(sc);
392 ar9003_paprd_enable(ah, false);
393 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
394 if (!(ah->txchainmask & BIT(chain)))
395 continue;
396
397 ar9003_paprd_populate_single_table(ah, caldata, chain);
398 }
399
400 ar9003_paprd_enable(ah, true);
401 ath9k_ps_restore(sc);
402}
403
404static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int chain)
405{
406 struct ieee80211_hw *hw = sc->hw;
407 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
408 struct ath_hw *ah = sc->sc_ah;
409 struct ath_common *common = ath9k_hw_common(ah);
410 struct ath_tx_control txctl;
411 int time_left;
412
413 memset(&txctl, 0, sizeof(txctl));
414 txctl.txq = sc->tx.txq_map[WME_AC_BE];
415
416 memset(tx_info, 0, sizeof(*tx_info));
417 tx_info->band = hw->conf.channel->band;
418 tx_info->flags |= IEEE80211_TX_CTL_NO_ACK;
419 tx_info->control.rates[0].idx = 0;
420 tx_info->control.rates[0].count = 1;
421 tx_info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
422 tx_info->control.rates[1].idx = -1;
423
424 init_completion(&sc->paprd_complete);
425 txctl.paprd = BIT(chain);
426
427 if (ath_tx_start(hw, skb, &txctl) != 0) {
428 ath_dbg(common, CALIBRATE, "PAPRD TX failed\n");
429 dev_kfree_skb_any(skb);
430 return false;
431 }
432
433 time_left = wait_for_completion_timeout(&sc->paprd_complete,
434 msecs_to_jiffies(ATH_PAPRD_TIMEOUT));
435
436 if (!time_left)
437 ath_dbg(common, CALIBRATE,
438 "Timeout waiting for paprd training on TX chain %d\n",
439 chain);
440
441 return !!time_left;
442}
443
444void ath_paprd_calibrate(struct work_struct *work)
445{
446 struct ath_softc *sc = container_of(work, struct ath_softc, paprd_work);
447 struct ieee80211_hw *hw = sc->hw;
448 struct ath_hw *ah = sc->sc_ah;
449 struct ieee80211_hdr *hdr;
450 struct sk_buff *skb = NULL;
451 struct ath9k_hw_cal_data *caldata = ah->caldata;
452 struct ath_common *common = ath9k_hw_common(ah);
453 int ftype;
454 int chain_ok = 0;
455 int chain;
456 int len = 1800;
457
458 if (!caldata)
459 return;
460
461 ath9k_ps_wakeup(sc);
462
463 if (ar9003_paprd_init_table(ah) < 0)
464 goto fail_paprd;
465
466 skb = alloc_skb(len, GFP_KERNEL);
467 if (!skb)
468 goto fail_paprd;
469
470 skb_put(skb, len);
471 memset(skb->data, 0, len);
472 hdr = (struct ieee80211_hdr *)skb->data;
473 ftype = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC;
474 hdr->frame_control = cpu_to_le16(ftype);
475 hdr->duration_id = cpu_to_le16(10);
476 memcpy(hdr->addr1, hw->wiphy->perm_addr, ETH_ALEN);
477 memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
478 memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
479
480 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
481 if (!(ah->txchainmask & BIT(chain)))
482 continue;
483
484 chain_ok = 0;
485
486 ath_dbg(common, CALIBRATE,
487 "Sending PAPRD frame for thermal measurement on chain %d\n",
488 chain);
489 if (!ath_paprd_send_frame(sc, skb, chain))
490 goto fail_paprd;
491
492 ar9003_paprd_setup_gain_table(ah, chain);
493
494 ath_dbg(common, CALIBRATE,
495 "Sending PAPRD training frame on chain %d\n", chain);
496 if (!ath_paprd_send_frame(sc, skb, chain))
497 goto fail_paprd;
498
499 if (!ar9003_paprd_is_done(ah)) {
500 ath_dbg(common, CALIBRATE,
501 "PAPRD not yet done on chain %d\n", chain);
502 break;
503 }
504
505 if (ar9003_paprd_create_curve(ah, caldata, chain)) {
506 ath_dbg(common, CALIBRATE,
507 "PAPRD create curve failed on chain %d\n",
508 chain);
509 break;
510 }
511
512 chain_ok = 1;
513 }
514 kfree_skb(skb);
515
516 if (chain_ok) {
517 caldata->paprd_done = true;
518 ath_paprd_activate(sc);
519 }
520
521fail_paprd:
522 ath9k_ps_restore(sc);
523}
524
525/*
526 * This routine performs the periodic noise floor calibration function
527 * that is used to adjust and optimize the chip performance. This
528 * takes environmental changes (location, temperature) into account.
529 * When the task is complete, it reschedules itself depending on the
530 * appropriate interval that was calculated.
531 */
532void ath_ani_calibrate(unsigned long data)
533{
534 struct ath_softc *sc = (struct ath_softc *)data;
535 struct ath_hw *ah = sc->sc_ah;
536 struct ath_common *common = ath9k_hw_common(ah);
537 bool longcal = false;
538 bool shortcal = false;
539 bool aniflag = false;
540 unsigned int timestamp = jiffies_to_msecs(jiffies);
541 u32 cal_interval, short_cal_interval, long_cal_interval;
542 unsigned long flags;
543
544 if (ah->caldata && ah->caldata->nfcal_interference)
545 long_cal_interval = ATH_LONG_CALINTERVAL_INT;
546 else
547 long_cal_interval = ATH_LONG_CALINTERVAL;
548
549 short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
550 ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
551
552 /* Only calibrate if awake */
553 if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE)
554 goto set_timer;
555
556 ath9k_ps_wakeup(sc);
557
558 /* Long calibration runs independently of short calibration. */
559 if ((timestamp - common->ani.longcal_timer) >= long_cal_interval) {
560 longcal = true;
561 common->ani.longcal_timer = timestamp;
562 }
563
564 /* Short calibration applies only while caldone is false */
565 if (!common->ani.caldone) {
566 if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) {
567 shortcal = true;
568 common->ani.shortcal_timer = timestamp;
569 common->ani.resetcal_timer = timestamp;
570 }
571 } else {
572 if ((timestamp - common->ani.resetcal_timer) >=
573 ATH_RESTART_CALINTERVAL) {
574 common->ani.caldone = ath9k_hw_reset_calvalid(ah);
575 if (common->ani.caldone)
576 common->ani.resetcal_timer = timestamp;
577 }
578 }
579
580 /* Verify whether we must check ANI */
581 if (sc->sc_ah->config.enable_ani
582 && (timestamp - common->ani.checkani_timer) >=
583 ah->config.ani_poll_interval) {
584 aniflag = true;
585 common->ani.checkani_timer = timestamp;
586 }
587
588 /* Call ANI routine if necessary */
589 if (aniflag) {
590 spin_lock_irqsave(&common->cc_lock, flags);
591 ath9k_hw_ani_monitor(ah, ah->curchan);
592 ath_update_survey_stats(sc);
593 spin_unlock_irqrestore(&common->cc_lock, flags);
594 }
595
596 /* Perform calibration if necessary */
597 if (longcal || shortcal) {
598 common->ani.caldone =
599 ath9k_hw_calibrate(ah, ah->curchan,
600 ah->rxchainmask, longcal);
601 }
602
603 ath_dbg(common, ANI,
604 "Calibration @%lu finished: %s %s %s, caldone: %s\n",
605 jiffies,
606 longcal ? "long" : "", shortcal ? "short" : "",
607 aniflag ? "ani" : "", common->ani.caldone ? "true" : "false");
608
609 ath9k_ps_restore(sc);
610
611set_timer:
612 /*
613 * Set timer interval based on previous results.
614 * The interval must be the shortest necessary to satisfy ANI,
615 * short calibration and long calibration.
616 */
617 ath9k_debug_samp_bb_mac(sc);
618 cal_interval = ATH_LONG_CALINTERVAL;
619 if (sc->sc_ah->config.enable_ani)
620 cal_interval = min(cal_interval,
621 (u32)ah->config.ani_poll_interval);
622 if (!common->ani.caldone)
623 cal_interval = min(cal_interval, (u32)short_cal_interval);
624
625 mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
626 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_PAPRD) && ah->caldata) {
627 if (!ah->caldata->paprd_done)
628 ieee80211_queue_work(sc->hw, &sc->paprd_work);
629 else if (!ah->paprd_table_write_done)
630 ath_paprd_activate(sc);
631 }
632}
633
634static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta, 324static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
635 struct ieee80211_vif *vif) 325 struct ieee80211_vif *vif)
636{ 326{
637 struct ath_node *an; 327 struct ath_node *an;
328 u8 density;
638 an = (struct ath_node *)sta->drv_priv; 329 an = (struct ath_node *)sta->drv_priv;
639 330
640#ifdef CONFIG_ATH9K_DEBUGFS 331#ifdef CONFIG_ATH9K_DEBUGFS
@@ -649,7 +340,8 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
649 ath_tx_node_init(sc, an); 340 ath_tx_node_init(sc, an);
650 an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + 341 an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
651 sta->ht_cap.ampdu_factor); 342 sta->ht_cap.ampdu_factor);
652 an->mpdudensity = parse_mpdudensity(sta->ht_cap.ampdu_density); 343 density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
344 an->mpdudensity = density;
653 } 345 }
654} 346}
655 347
@@ -668,13 +360,12 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
668 ath_tx_node_cleanup(sc, an); 360 ath_tx_node_cleanup(sc, an);
669} 361}
670 362
671
672void ath9k_tasklet(unsigned long data) 363void ath9k_tasklet(unsigned long data)
673{ 364{
674 struct ath_softc *sc = (struct ath_softc *)data; 365 struct ath_softc *sc = (struct ath_softc *)data;
675 struct ath_hw *ah = sc->sc_ah; 366 struct ath_hw *ah = sc->sc_ah;
676 struct ath_common *common = ath9k_hw_common(ah); 367 struct ath_common *common = ath9k_hw_common(ah);
677 368 unsigned long flags;
678 u32 status = sc->intrstatus; 369 u32 status = sc->intrstatus;
679 u32 rxmask; 370 u32 rxmask;
680 371
@@ -693,10 +384,12 @@ void ath9k_tasklet(unsigned long data)
693 384
694 RESET_STAT_INC(sc, type); 385 RESET_STAT_INC(sc, type);
695#endif 386#endif
387 set_bit(SC_OP_HW_RESET, &sc->sc_flags);
696 ieee80211_queue_work(sc->hw, &sc->hw_reset_work); 388 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
697 goto out; 389 goto out;
698 } 390 }
699 391
392 spin_lock_irqsave(&sc->sc_pm_lock, flags);
700 if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) { 393 if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) {
701 /* 394 /*
702 * TSF sync does not look correct; remain awake to sync with 395 * TSF sync does not look correct; remain awake to sync with
@@ -705,6 +398,7 @@ void ath9k_tasklet(unsigned long data)
705 ath_dbg(common, PS, "TSFOOR - Sync with next Beacon\n"); 398 ath_dbg(common, PS, "TSFOOR - Sync with next Beacon\n");
706 sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC; 399 sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC;
707 } 400 }
401 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
708 402
709 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 403 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
710 rxmask = (ATH9K_INT_RXHP | ATH9K_INT_RXLP | ATH9K_INT_RXEOL | 404 rxmask = (ATH9K_INT_RXHP | ATH9K_INT_RXLP | ATH9K_INT_RXEOL |
@@ -766,15 +460,17 @@ irqreturn_t ath_isr(int irq, void *dev)
766 * touch anything. Note this can happen early 460 * touch anything. Note this can happen early
767 * on if the IRQ is shared. 461 * on if the IRQ is shared.
768 */ 462 */
769 if (sc->sc_flags & SC_OP_INVALID) 463 if (test_bit(SC_OP_INVALID, &sc->sc_flags))
770 return IRQ_NONE; 464 return IRQ_NONE;
771 465
772
773 /* shared irq, not for us */ 466 /* shared irq, not for us */
774 467
775 if (!ath9k_hw_intrpend(ah)) 468 if (!ath9k_hw_intrpend(ah))
776 return IRQ_NONE; 469 return IRQ_NONE;
777 470
471 if(test_bit(SC_OP_HW_RESET, &sc->sc_flags))
472 return IRQ_HANDLED;
473
778 /* 474 /*
779 * Figure out the reason(s) for the interrupt. Note 475 * Figure out the reason(s) for the interrupt. Note
780 * that the hal returns a pseudo-ISR that may include 476 * that the hal returns a pseudo-ISR that may include
@@ -827,24 +523,6 @@ irqreturn_t ath_isr(int irq, void *dev)
827 ath9k_hw_set_interrupts(ah); 523 ath9k_hw_set_interrupts(ah);
828 } 524 }
829 525
830 if (status & ATH9K_INT_MIB) {
831 /*
832 * Disable interrupts until we service the MIB
833 * interrupt; otherwise it will continue to
834 * fire.
835 */
836 ath9k_hw_disable_interrupts(ah);
837 /*
838 * Let the hal handle the event. We assume
839 * it will clear whatever condition caused
840 * the interrupt.
841 */
842 spin_lock(&common->cc_lock);
843 ath9k_hw_proc_mib_event(ah);
844 spin_unlock(&common->cc_lock);
845 ath9k_hw_enable_interrupts(ah);
846 }
847
848 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) 526 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
849 if (status & ATH9K_INT_TIM_TIMER) { 527 if (status & ATH9K_INT_TIM_TIMER) {
850 if (ATH_DBG_WARN_ON_ONCE(sc->ps_idle)) 528 if (ATH_DBG_WARN_ON_ONCE(sc->ps_idle))
@@ -852,8 +530,10 @@ irqreturn_t ath_isr(int irq, void *dev)
852 /* Clear RxAbort bit so that we can 530 /* Clear RxAbort bit so that we can
853 * receive frames */ 531 * receive frames */
854 ath9k_setpower(sc, ATH9K_PM_AWAKE); 532 ath9k_setpower(sc, ATH9K_PM_AWAKE);
533 spin_lock(&sc->sc_pm_lock);
855 ath9k_hw_setrxabort(sc->sc_ah, 0); 534 ath9k_hw_setrxabort(sc->sc_ah, 0);
856 sc->ps_flags |= PS_WAIT_FOR_BEACON; 535 sc->ps_flags |= PS_WAIT_FOR_BEACON;
536 spin_unlock(&sc->sc_pm_lock);
857 } 537 }
858 538
859chip_reset: 539chip_reset:
@@ -902,96 +582,6 @@ void ath_reset_work(struct work_struct *work)
902 ath_reset(sc, true); 582 ath_reset(sc, true);
903} 583}
904 584
905void ath_hw_check(struct work_struct *work)
906{
907 struct ath_softc *sc = container_of(work, struct ath_softc, hw_check_work);
908 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
909 unsigned long flags;
910 int busy;
911 u8 is_alive, nbeacon = 1;
912
913 ath9k_ps_wakeup(sc);
914 is_alive = ath9k_hw_check_alive(sc->sc_ah);
915
916 if (is_alive && !AR_SREV_9300(sc->sc_ah))
917 goto out;
918 else if (!is_alive && AR_SREV_9300(sc->sc_ah)) {
919 ath_dbg(common, RESET,
920 "DCU stuck is detected. Schedule chip reset\n");
921 RESET_STAT_INC(sc, RESET_TYPE_MAC_HANG);
922 goto sched_reset;
923 }
924
925 spin_lock_irqsave(&common->cc_lock, flags);
926 busy = ath_update_survey_stats(sc);
927 spin_unlock_irqrestore(&common->cc_lock, flags);
928
929 ath_dbg(common, RESET, "Possible baseband hang, busy=%d (try %d)\n",
930 busy, sc->hw_busy_count + 1);
931 if (busy >= 99) {
932 if (++sc->hw_busy_count >= 3) {
933 RESET_STAT_INC(sc, RESET_TYPE_BB_HANG);
934 goto sched_reset;
935 }
936 } else if (busy >= 0) {
937 sc->hw_busy_count = 0;
938 nbeacon = 3;
939 }
940
941 ath_start_rx_poll(sc, nbeacon);
942 goto out;
943
944sched_reset:
945 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
946out:
947 ath9k_ps_restore(sc);
948}
949
950static void ath_hw_pll_rx_hang_check(struct ath_softc *sc, u32 pll_sqsum)
951{
952 static int count;
953 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
954
955 if (pll_sqsum >= 0x40000) {
956 count++;
957 if (count == 3) {
958 /* Rx is hung for more than 500ms. Reset it */
959 ath_dbg(common, RESET, "Possible RX hang, resetting\n");
960 RESET_STAT_INC(sc, RESET_TYPE_PLL_HANG);
961 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
962 count = 0;
963 }
964 } else
965 count = 0;
966}
967
968void ath_hw_pll_work(struct work_struct *work)
969{
970 struct ath_softc *sc = container_of(work, struct ath_softc,
971 hw_pll_work.work);
972 u32 pll_sqsum;
973
974 /*
975 * ensure that the PLL WAR is executed only
976 * after the STA is associated (or) if the
977 * beaconing had started in interfaces that
978 * uses beacons.
979 */
980 if (!(sc->sc_flags & SC_OP_BEACONS))
981 return;
982
983 if (AR_SREV_9485(sc->sc_ah)) {
984
985 ath9k_ps_wakeup(sc);
986 pll_sqsum = ar9003_get_pll_sqsum_dvc(sc->sc_ah);
987 ath9k_ps_restore(sc);
988
989 ath_hw_pll_rx_hang_check(sc, pll_sqsum);
990
991 ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/5);
992 }
993}
994
995/**********************/ 585/**********************/
996/* mac80211 callbacks */ 586/* mac80211 callbacks */
997/**********************/ 587/**********************/
@@ -1054,10 +644,9 @@ static int ath9k_start(struct ieee80211_hw *hw)
1054 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) 644 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT)
1055 ah->imask |= ATH9K_INT_CST; 645 ah->imask |= ATH9K_INT_CST;
1056 646
1057 if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) 647 ath_mci_enable(sc);
1058 ah->imask |= ATH9K_INT_MCI;
1059 648
1060 sc->sc_flags &= ~SC_OP_INVALID; 649 clear_bit(SC_OP_INVALID, &sc->sc_flags);
1061 sc->sc_ah->is_monitoring = false; 650 sc->sc_ah->is_monitoring = false;
1062 651
1063 if (!ath_complete_reset(sc, false)) { 652 if (!ath_complete_reset(sc, false)) {
@@ -1080,8 +669,6 @@ static int ath9k_start(struct ieee80211_hw *hw)
1080 669
1081 spin_unlock_bh(&sc->sc_pcu_lock); 670 spin_unlock_bh(&sc->sc_pcu_lock);
1082 671
1083 ath9k_start_btcoex(sc);
1084
1085 if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en) 672 if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en)
1086 common->bus_ops->extn_synch_en(common); 673 common->bus_ops->extn_synch_en(common);
1087 674
@@ -1099,6 +686,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1099 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 686 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1100 struct ath_tx_control txctl; 687 struct ath_tx_control txctl;
1101 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 688 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
689 unsigned long flags;
1102 690
1103 if (sc->ps_enabled) { 691 if (sc->ps_enabled) {
1104 /* 692 /*
@@ -1121,6 +709,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1121 * completed and if needed, also for RX of buffered frames. 709 * completed and if needed, also for RX of buffered frames.
1122 */ 710 */
1123 ath9k_ps_wakeup(sc); 711 ath9k_ps_wakeup(sc);
712 spin_lock_irqsave(&sc->sc_pm_lock, flags);
1124 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) 713 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
1125 ath9k_hw_setrxabort(sc->sc_ah, 0); 714 ath9k_hw_setrxabort(sc->sc_ah, 0);
1126 if (ieee80211_is_pspoll(hdr->frame_control)) { 715 if (ieee80211_is_pspoll(hdr->frame_control)) {
@@ -1136,6 +725,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1136 * the ps_flags bit is cleared. We are just dropping 725 * the ps_flags bit is cleared. We are just dropping
1137 * the ps_usecount here. 726 * the ps_usecount here.
1138 */ 727 */
728 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
1139 ath9k_ps_restore(sc); 729 ath9k_ps_restore(sc);
1140 } 730 }
1141 731
@@ -1176,7 +766,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
1176 ath_cancel_work(sc); 766 ath_cancel_work(sc);
1177 del_timer_sync(&sc->rx_poll_timer); 767 del_timer_sync(&sc->rx_poll_timer);
1178 768
1179 if (sc->sc_flags & SC_OP_INVALID) { 769 if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
1180 ath_dbg(common, ANY, "Device not present\n"); 770 ath_dbg(common, ANY, "Device not present\n");
1181 mutex_unlock(&sc->mutex); 771 mutex_unlock(&sc->mutex);
1182 return; 772 return;
@@ -1185,8 +775,6 @@ static void ath9k_stop(struct ieee80211_hw *hw)
1185 /* Ensure HW is awake when we try to shut it down. */ 775 /* Ensure HW is awake when we try to shut it down. */
1186 ath9k_ps_wakeup(sc); 776 ath9k_ps_wakeup(sc);
1187 777
1188 ath9k_stop_btcoex(sc);
1189
1190 spin_lock_bh(&sc->sc_pcu_lock); 778 spin_lock_bh(&sc->sc_pcu_lock);
1191 779
1192 /* prevent tasklets to enable interrupts once we disable them */ 780 /* prevent tasklets to enable interrupts once we disable them */
@@ -1233,7 +821,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
1233 821
1234 ath9k_ps_restore(sc); 822 ath9k_ps_restore(sc);
1235 823
1236 sc->sc_flags |= SC_OP_INVALID; 824 set_bit(SC_OP_INVALID, &sc->sc_flags);
1237 sc->ps_idle = prev_idle; 825 sc->ps_idle = prev_idle;
1238 826
1239 mutex_unlock(&sc->mutex); 827 mutex_unlock(&sc->mutex);
@@ -1337,11 +925,11 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
1337 /* Set op-mode & TSF */ 925 /* Set op-mode & TSF */
1338 if (iter_data.naps > 0) { 926 if (iter_data.naps > 0) {
1339 ath9k_hw_set_tsfadjust(ah, 1); 927 ath9k_hw_set_tsfadjust(ah, 1);
1340 sc->sc_flags |= SC_OP_TSF_RESET; 928 set_bit(SC_OP_TSF_RESET, &sc->sc_flags);
1341 ah->opmode = NL80211_IFTYPE_AP; 929 ah->opmode = NL80211_IFTYPE_AP;
1342 } else { 930 } else {
1343 ath9k_hw_set_tsfadjust(ah, 0); 931 ath9k_hw_set_tsfadjust(ah, 0);
1344 sc->sc_flags &= ~SC_OP_TSF_RESET; 932 clear_bit(SC_OP_TSF_RESET, &sc->sc_flags);
1345 933
1346 if (iter_data.nmeshes) 934 if (iter_data.nmeshes)
1347 ah->opmode = NL80211_IFTYPE_MESH_POINT; 935 ah->opmode = NL80211_IFTYPE_MESH_POINT;
@@ -1356,14 +944,10 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
1356 /* 944 /*
1357 * Enable MIB interrupts when there are hardware phy counters. 945 * Enable MIB interrupts when there are hardware phy counters.
1358 */ 946 */
1359 if ((iter_data.nstations + iter_data.nadhocs + iter_data.nmeshes) > 0) { 947 if ((iter_data.nstations + iter_data.nadhocs + iter_data.nmeshes) > 0)
1360 if (ah->config.enable_ani)
1361 ah->imask |= ATH9K_INT_MIB;
1362 ah->imask |= ATH9K_INT_TSFOOR; 948 ah->imask |= ATH9K_INT_TSFOOR;
1363 } else { 949 else
1364 ah->imask &= ~ATH9K_INT_MIB;
1365 ah->imask &= ~ATH9K_INT_TSFOOR; 950 ah->imask &= ~ATH9K_INT_TSFOOR;
1366 }
1367 951
1368 ath9k_hw_set_interrupts(ah); 952 ath9k_hw_set_interrupts(ah);
1369 953
@@ -1372,12 +956,12 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
1372 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER; 956 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
1373 957
1374 if (!common->disable_ani) { 958 if (!common->disable_ani) {
1375 sc->sc_flags |= SC_OP_ANI_RUN; 959 set_bit(SC_OP_ANI_RUN, &sc->sc_flags);
1376 ath_start_ani(common); 960 ath_start_ani(common);
1377 } 961 }
1378 962
1379 } else { 963 } else {
1380 sc->sc_flags &= ~SC_OP_ANI_RUN; 964 clear_bit(SC_OP_ANI_RUN, &sc->sc_flags);
1381 del_timer_sync(&common->ani.timer); 965 del_timer_sync(&common->ani.timer);
1382 } 966 }
1383} 967}
@@ -1398,25 +982,6 @@ static void ath9k_do_vif_add_setup(struct ieee80211_hw *hw,
1398 } 982 }
1399} 983}
1400 984
1401void ath_start_rx_poll(struct ath_softc *sc, u8 nbeacon)
1402{
1403 if (!AR_SREV_9300(sc->sc_ah))
1404 return;
1405
1406 if (!(sc->sc_flags & SC_OP_PRIM_STA_VIF))
1407 return;
1408
1409 mod_timer(&sc->rx_poll_timer, jiffies + msecs_to_jiffies
1410 (nbeacon * sc->cur_beacon_conf.beacon_interval));
1411}
1412
1413void ath_rx_poll(unsigned long data)
1414{
1415 struct ath_softc *sc = (struct ath_softc *)data;
1416
1417 ieee80211_queue_work(sc->hw, &sc->hw_check_work);
1418}
1419
1420static int ath9k_add_interface(struct ieee80211_hw *hw, 985static int ath9k_add_interface(struct ieee80211_hw *hw,
1421 struct ieee80211_vif *vif) 986 struct ieee80211_vif *vif)
1422{ 987{
@@ -1573,14 +1138,17 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1573 1138
1574 if (changed & IEEE80211_CONF_CHANGE_IDLE) { 1139 if (changed & IEEE80211_CONF_CHANGE_IDLE) {
1575 sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE); 1140 sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
1576 if (sc->ps_idle) 1141 if (sc->ps_idle) {
1577 ath_cancel_work(sc); 1142 ath_cancel_work(sc);
1578 else 1143 ath9k_stop_btcoex(sc);
1144 } else {
1145 ath9k_start_btcoex(sc);
1579 /* 1146 /*
1580 * The chip needs a reset to properly wake up from 1147 * The chip needs a reset to properly wake up from
1581 * full sleep 1148 * full sleep
1582 */ 1149 */
1583 reset_channel = ah->chip_fullsleep; 1150 reset_channel = ah->chip_fullsleep;
1151 }
1584 } 1152 }
1585 1153
1586 /* 1154 /*
@@ -1618,11 +1186,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1618 if (ah->curchan) 1186 if (ah->curchan)
1619 old_pos = ah->curchan - &ah->channels[0]; 1187 old_pos = ah->curchan - &ah->channels[0];
1620 1188
1621 if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
1622 sc->sc_flags |= SC_OP_OFFCHANNEL;
1623 else
1624 sc->sc_flags &= ~SC_OP_OFFCHANNEL;
1625
1626 ath_dbg(common, CONFIG, "Set channel: %d MHz type: %d\n", 1189 ath_dbg(common, CONFIG, "Set channel: %d MHz type: %d\n",
1627 curchan->center_freq, conf->channel_type); 1190 curchan->center_freq, conf->channel_type);
1628 1191
@@ -1664,6 +1227,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1664 if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) { 1227 if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) {
1665 ath_err(common, "Unable to set channel\n"); 1228 ath_err(common, "Unable to set channel\n");
1666 mutex_unlock(&sc->mutex); 1229 mutex_unlock(&sc->mutex);
1230 ath9k_ps_restore(sc);
1667 return -EINVAL; 1231 return -EINVAL;
1668 } 1232 }
1669 1233
@@ -1902,16 +1466,16 @@ static void ath9k_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
1902 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1466 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1903 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 1467 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
1904 struct ath_vif *avp = (void *)vif->drv_priv; 1468 struct ath_vif *avp = (void *)vif->drv_priv;
1905 1469 unsigned long flags;
1906 /* 1470 /*
1907 * Skip iteration if primary station vif's bss info 1471 * Skip iteration if primary station vif's bss info
1908 * was not changed 1472 * was not changed
1909 */ 1473 */
1910 if (sc->sc_flags & SC_OP_PRIM_STA_VIF) 1474 if (test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags))
1911 return; 1475 return;
1912 1476
1913 if (bss_conf->assoc) { 1477 if (bss_conf->assoc) {
1914 sc->sc_flags |= SC_OP_PRIM_STA_VIF; 1478 set_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags);
1915 avp->primary_sta_vif = true; 1479 avp->primary_sta_vif = true;
1916 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN); 1480 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1917 common->curaid = bss_conf->aid; 1481 common->curaid = bss_conf->aid;
@@ -1924,7 +1488,10 @@ static void ath9k_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
1924 * on the receipt of the first Beacon frame (i.e., 1488 * on the receipt of the first Beacon frame (i.e.,
1925 * after time sync with the AP). 1489 * after time sync with the AP).
1926 */ 1490 */
1491 spin_lock_irqsave(&sc->sc_pm_lock, flags);
1927 sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON; 1492 sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
1493 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
1494
1928 /* Reset rssi stats */ 1495 /* Reset rssi stats */
1929 sc->last_rssi = ATH_RSSI_DUMMY_MARKER; 1496 sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
1930 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER; 1497 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
@@ -1932,7 +1499,7 @@ static void ath9k_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
1932 ath_start_rx_poll(sc, 3); 1499 ath_start_rx_poll(sc, 3);
1933 1500
1934 if (!common->disable_ani) { 1501 if (!common->disable_ani) {
1935 sc->sc_flags |= SC_OP_ANI_RUN; 1502 set_bit(SC_OP_ANI_RUN, &sc->sc_flags);
1936 ath_start_ani(common); 1503 ath_start_ani(common);
1937 } 1504 }
1938 1505
@@ -1952,7 +1519,8 @@ static void ath9k_config_bss(struct ath_softc *sc, struct ieee80211_vif *vif)
1952 if (avp->primary_sta_vif && !bss_conf->assoc) { 1519 if (avp->primary_sta_vif && !bss_conf->assoc) {
1953 ath_dbg(common, CONFIG, "Bss Info DISASSOC %d, bssid %pM\n", 1520 ath_dbg(common, CONFIG, "Bss Info DISASSOC %d, bssid %pM\n",
1954 common->curaid, common->curbssid); 1521 common->curaid, common->curbssid);
1955 sc->sc_flags &= ~(SC_OP_PRIM_STA_VIF | SC_OP_BEACONS); 1522 clear_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags);
1523 clear_bit(SC_OP_BEACONS, &sc->sc_flags);
1956 avp->primary_sta_vif = false; 1524 avp->primary_sta_vif = false;
1957 memset(common->curbssid, 0, ETH_ALEN); 1525 memset(common->curbssid, 0, ETH_ALEN);
1958 common->curaid = 0; 1526 common->curaid = 0;
@@ -1965,10 +1533,9 @@ static void ath9k_config_bss(struct ath_softc *sc, struct ieee80211_vif *vif)
1965 * None of station vifs are associated. 1533 * None of station vifs are associated.
1966 * Clear bssid & aid 1534 * Clear bssid & aid
1967 */ 1535 */
1968 if (!(sc->sc_flags & SC_OP_PRIM_STA_VIF)) { 1536 if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
1969 ath9k_hw_write_associd(sc->sc_ah); 1537 ath9k_hw_write_associd(sc->sc_ah);
1970 /* Stop ANI */ 1538 clear_bit(SC_OP_ANI_RUN, &sc->sc_flags);
1971 sc->sc_flags &= ~SC_OP_ANI_RUN;
1972 del_timer_sync(&common->ani.timer); 1539 del_timer_sync(&common->ani.timer);
1973 del_timer_sync(&sc->rx_poll_timer); 1540 del_timer_sync(&sc->rx_poll_timer);
1974 memset(&sc->caldata, 0, sizeof(sc->caldata)); 1541 memset(&sc->caldata, 0, sizeof(sc->caldata));
@@ -2006,12 +1573,12 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
2006 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER; 1573 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
2007 1574
2008 if (!common->disable_ani) { 1575 if (!common->disable_ani) {
2009 sc->sc_flags |= SC_OP_ANI_RUN; 1576 set_bit(SC_OP_ANI_RUN, &sc->sc_flags);
2010 ath_start_ani(common); 1577 ath_start_ani(common);
2011 } 1578 }
2012 1579
2013 } else { 1580 } else {
2014 sc->sc_flags &= ~SC_OP_ANI_RUN; 1581 clear_bit(SC_OP_ANI_RUN, &sc->sc_flags);
2015 del_timer_sync(&common->ani.timer); 1582 del_timer_sync(&common->ani.timer);
2016 del_timer_sync(&sc->rx_poll_timer); 1583 del_timer_sync(&sc->rx_poll_timer);
2017 } 1584 }
@@ -2023,7 +1590,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
2023 */ 1590 */
2024 if ((changed & BSS_CHANGED_BEACON_INT) && 1591 if ((changed & BSS_CHANGED_BEACON_INT) &&
2025 (vif->type == NL80211_IFTYPE_AP)) 1592 (vif->type == NL80211_IFTYPE_AP))
2026 sc->sc_flags |= SC_OP_TSF_RESET; 1593 set_bit(SC_OP_TSF_RESET, &sc->sc_flags);
2027 1594
2028 /* Configure beaconing (AP, IBSS, MESH) */ 1595 /* Configure beaconing (AP, IBSS, MESH) */
2029 if (ath9k_uses_beacons(vif->type) && 1596 if (ath9k_uses_beacons(vif->type) &&
@@ -2215,7 +1782,7 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
2215 return; 1782 return;
2216 } 1783 }
2217 1784
2218 if (sc->sc_flags & SC_OP_INVALID) { 1785 if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
2219 ath_dbg(common, ANY, "Device not present\n"); 1786 ath_dbg(common, ANY, "Device not present\n");
2220 mutex_unlock(&sc->mutex); 1787 mutex_unlock(&sc->mutex);
2221 return; 1788 return;
@@ -2380,6 +1947,134 @@ static int ath9k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
2380 return 0; 1947 return 0;
2381} 1948}
2382 1949
1950#ifdef CONFIG_ATH9K_DEBUGFS
1951
1952/* Ethtool support for get-stats */
1953
1954#define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO"
1955static const char ath9k_gstrings_stats[][ETH_GSTRING_LEN] = {
1956 "tx_pkts_nic",
1957 "tx_bytes_nic",
1958 "rx_pkts_nic",
1959 "rx_bytes_nic",
1960 AMKSTR(d_tx_pkts),
1961 AMKSTR(d_tx_bytes),
1962 AMKSTR(d_tx_mpdus_queued),
1963 AMKSTR(d_tx_mpdus_completed),
1964 AMKSTR(d_tx_mpdu_xretries),
1965 AMKSTR(d_tx_aggregates),
1966 AMKSTR(d_tx_ampdus_queued_hw),
1967 AMKSTR(d_tx_ampdus_queued_sw),
1968 AMKSTR(d_tx_ampdus_completed),
1969 AMKSTR(d_tx_ampdu_retries),
1970 AMKSTR(d_tx_ampdu_xretries),
1971 AMKSTR(d_tx_fifo_underrun),
1972 AMKSTR(d_tx_op_exceeded),
1973 AMKSTR(d_tx_timer_expiry),
1974 AMKSTR(d_tx_desc_cfg_err),
1975 AMKSTR(d_tx_data_underrun),
1976 AMKSTR(d_tx_delim_underrun),
1977
1978 "d_rx_decrypt_crc_err",
1979 "d_rx_phy_err",
1980 "d_rx_mic_err",
1981 "d_rx_pre_delim_crc_err",
1982 "d_rx_post_delim_crc_err",
1983 "d_rx_decrypt_busy_err",
1984
1985 "d_rx_phyerr_radar",
1986 "d_rx_phyerr_ofdm_timing",
1987 "d_rx_phyerr_cck_timing",
1988
1989};
1990#define ATH9K_SSTATS_LEN ARRAY_SIZE(ath9k_gstrings_stats)
1991
1992static void ath9k_get_et_strings(struct ieee80211_hw *hw,
1993 struct ieee80211_vif *vif,
1994 u32 sset, u8 *data)
1995{
1996 if (sset == ETH_SS_STATS)
1997 memcpy(data, *ath9k_gstrings_stats,
1998 sizeof(ath9k_gstrings_stats));
1999}
2000
2001static int ath9k_get_et_sset_count(struct ieee80211_hw *hw,
2002 struct ieee80211_vif *vif, int sset)
2003{
2004 if (sset == ETH_SS_STATS)
2005 return ATH9K_SSTATS_LEN;
2006 return 0;
2007}
2008
2009#define PR_QNUM(_n) (sc->tx.txq_map[_n]->axq_qnum)
2010#define AWDATA(elem) \
2011 do { \
2012 data[i++] = sc->debug.stats.txstats[PR_QNUM(WME_AC_BE)].elem; \
2013 data[i++] = sc->debug.stats.txstats[PR_QNUM(WME_AC_BK)].elem; \
2014 data[i++] = sc->debug.stats.txstats[PR_QNUM(WME_AC_VI)].elem; \
2015 data[i++] = sc->debug.stats.txstats[PR_QNUM(WME_AC_VO)].elem; \
2016 } while (0)
2017
2018#define AWDATA_RX(elem) \
2019 do { \
2020 data[i++] = sc->debug.stats.rxstats.elem; \
2021 } while (0)
2022
2023static void ath9k_get_et_stats(struct ieee80211_hw *hw,
2024 struct ieee80211_vif *vif,
2025 struct ethtool_stats *stats, u64 *data)
2026{
2027 struct ath_softc *sc = hw->priv;
2028 int i = 0;
2029
2030 data[i++] = (sc->debug.stats.txstats[PR_QNUM(WME_AC_BE)].tx_pkts_all +
2031 sc->debug.stats.txstats[PR_QNUM(WME_AC_BK)].tx_pkts_all +
2032 sc->debug.stats.txstats[PR_QNUM(WME_AC_VI)].tx_pkts_all +
2033 sc->debug.stats.txstats[PR_QNUM(WME_AC_VO)].tx_pkts_all);
2034 data[i++] = (sc->debug.stats.txstats[PR_QNUM(WME_AC_BE)].tx_bytes_all +
2035 sc->debug.stats.txstats[PR_QNUM(WME_AC_BK)].tx_bytes_all +
2036 sc->debug.stats.txstats[PR_QNUM(WME_AC_VI)].tx_bytes_all +
2037 sc->debug.stats.txstats[PR_QNUM(WME_AC_VO)].tx_bytes_all);
2038 AWDATA_RX(rx_pkts_all);
2039 AWDATA_RX(rx_bytes_all);
2040
2041 AWDATA(tx_pkts_all);
2042 AWDATA(tx_bytes_all);
2043 AWDATA(queued);
2044 AWDATA(completed);
2045 AWDATA(xretries);
2046 AWDATA(a_aggr);
2047 AWDATA(a_queued_hw);
2048 AWDATA(a_queued_sw);
2049 AWDATA(a_completed);
2050 AWDATA(a_retries);
2051 AWDATA(a_xretries);
2052 AWDATA(fifo_underrun);
2053 AWDATA(xtxop);
2054 AWDATA(timer_exp);
2055 AWDATA(desc_cfg_err);
2056 AWDATA(data_underrun);
2057 AWDATA(delim_underrun);
2058
2059 AWDATA_RX(decrypt_crc_err);
2060 AWDATA_RX(phy_err);
2061 AWDATA_RX(mic_err);
2062 AWDATA_RX(pre_delim_crc_err);
2063 AWDATA_RX(post_delim_crc_err);
2064 AWDATA_RX(decrypt_busy_err);
2065
2066 AWDATA_RX(phy_err_stats[ATH9K_PHYERR_RADAR]);
2067 AWDATA_RX(phy_err_stats[ATH9K_PHYERR_OFDM_TIMING]);
2068 AWDATA_RX(phy_err_stats[ATH9K_PHYERR_CCK_TIMING]);
2069
2070 WARN_ON(i != ATH9K_SSTATS_LEN);
2071}
2072
2073/* End of ethtool get-stats functions */
2074
2075#endif
2076
2077
2383struct ieee80211_ops ath9k_ops = { 2078struct ieee80211_ops ath9k_ops = {
2384 .tx = ath9k_tx, 2079 .tx = ath9k_tx,
2385 .start = ath9k_start, 2080 .start = ath9k_start,
@@ -2408,4 +2103,10 @@ struct ieee80211_ops ath9k_ops = {
2408 .get_stats = ath9k_get_stats, 2103 .get_stats = ath9k_get_stats,
2409 .set_antenna = ath9k_set_antenna, 2104 .set_antenna = ath9k_set_antenna,
2410 .get_antenna = ath9k_get_antenna, 2105 .get_antenna = ath9k_get_antenna,
2106
2107#ifdef CONFIG_ATH9K_DEBUGFS
2108 .get_et_sset_count = ath9k_get_et_sset_count,
2109 .get_et_stats = ath9k_get_et_stats,
2110 .get_et_strings = ath9k_get_et_strings,
2111#endif
2411}; 2112};
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index 29fe52d69973..87acff7fdaae 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -20,7 +20,7 @@
20#include "ath9k.h" 20#include "ath9k.h"
21#include "mci.h" 21#include "mci.h"
22 22
23static const u8 ath_mci_duty_cycle[] = { 0, 50, 60, 70, 80, 85, 90, 95, 98 }; 23static const u8 ath_mci_duty_cycle[] = { 55, 50, 60, 70, 80, 85, 90, 95, 98 };
24 24
25static struct ath_mci_profile_info* 25static struct ath_mci_profile_info*
26ath_mci_find_profile(struct ath_mci_profile *mci, 26ath_mci_find_profile(struct ath_mci_profile *mci,
@@ -28,11 +28,14 @@ ath_mci_find_profile(struct ath_mci_profile *mci,
28{ 28{
29 struct ath_mci_profile_info *entry; 29 struct ath_mci_profile_info *entry;
30 30
31 if (list_empty(&mci->info))
32 return NULL;
33
31 list_for_each_entry(entry, &mci->info, list) { 34 list_for_each_entry(entry, &mci->info, list) {
32 if (entry->conn_handle == info->conn_handle) 35 if (entry->conn_handle == info->conn_handle)
33 break; 36 return entry;
34 } 37 }
35 return entry; 38 return NULL;
36} 39}
37 40
38static bool ath_mci_add_profile(struct ath_common *common, 41static bool ath_mci_add_profile(struct ath_common *common,
@@ -49,31 +52,21 @@ static bool ath_mci_add_profile(struct ath_common *common,
49 (info->type != MCI_GPM_COEX_PROFILE_VOICE)) 52 (info->type != MCI_GPM_COEX_PROFILE_VOICE))
50 return false; 53 return false;
51 54
52 entry = ath_mci_find_profile(mci, info); 55 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
56 if (!entry)
57 return false;
53 58
54 if (entry) { 59 memcpy(entry, info, 10);
55 memcpy(entry, info, 10); 60 INC_PROF(mci, info);
56 } else { 61 list_add_tail(&entry->list, &mci->info);
57 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
58 if (!entry)
59 return false;
60
61 memcpy(entry, info, 10);
62 INC_PROF(mci, info);
63 list_add_tail(&info->list, &mci->info);
64 }
65 62
66 return true; 63 return true;
67} 64}
68 65
69static void ath_mci_del_profile(struct ath_common *common, 66static void ath_mci_del_profile(struct ath_common *common,
70 struct ath_mci_profile *mci, 67 struct ath_mci_profile *mci,
71 struct ath_mci_profile_info *info) 68 struct ath_mci_profile_info *entry)
72{ 69{
73 struct ath_mci_profile_info *entry;
74
75 entry = ath_mci_find_profile(mci, info);
76
77 if (!entry) 70 if (!entry)
78 return; 71 return;
79 72
@@ -86,12 +79,16 @@ void ath_mci_flush_profile(struct ath_mci_profile *mci)
86{ 79{
87 struct ath_mci_profile_info *info, *tinfo; 80 struct ath_mci_profile_info *info, *tinfo;
88 81
82 mci->aggr_limit = 0;
83
84 if (list_empty(&mci->info))
85 return;
86
89 list_for_each_entry_safe(info, tinfo, &mci->info, list) { 87 list_for_each_entry_safe(info, tinfo, &mci->info, list) {
90 list_del(&info->list); 88 list_del(&info->list);
91 DEC_PROF(mci, info); 89 DEC_PROF(mci, info);
92 kfree(info); 90 kfree(info);
93 } 91 }
94 mci->aggr_limit = 0;
95} 92}
96 93
97static void ath_mci_adjust_aggr_limit(struct ath_btcoex *btcoex) 94static void ath_mci_adjust_aggr_limit(struct ath_btcoex *btcoex)
@@ -116,42 +113,60 @@ static void ath_mci_update_scheme(struct ath_softc *sc)
116 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 113 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
117 struct ath_btcoex *btcoex = &sc->btcoex; 114 struct ath_btcoex *btcoex = &sc->btcoex;
118 struct ath_mci_profile *mci = &btcoex->mci; 115 struct ath_mci_profile *mci = &btcoex->mci;
116 struct ath9k_hw_mci *mci_hw = &sc->sc_ah->btcoex_hw.mci;
119 struct ath_mci_profile_info *info; 117 struct ath_mci_profile_info *info;
120 u32 num_profile = NUM_PROF(mci); 118 u32 num_profile = NUM_PROF(mci);
121 119
120 if (mci_hw->config & ATH_MCI_CONFIG_DISABLE_TUNING)
121 goto skip_tuning;
122
123 btcoex->duty_cycle = ath_mci_duty_cycle[num_profile];
124
122 if (num_profile == 1) { 125 if (num_profile == 1) {
123 info = list_first_entry(&mci->info, 126 info = list_first_entry(&mci->info,
124 struct ath_mci_profile_info, 127 struct ath_mci_profile_info,
125 list); 128 list);
126 if (mci->num_sco && info->T == 12) { 129 if (mci->num_sco) {
127 mci->aggr_limit = 8; 130 if (info->T == 12)
131 mci->aggr_limit = 8;
132 else if (info->T == 6) {
133 mci->aggr_limit = 6;
134 btcoex->duty_cycle = 30;
135 }
128 ath_dbg(common, MCI, 136 ath_dbg(common, MCI,
129 "Single SCO, aggregation limit 2 ms\n"); 137 "Single SCO, aggregation limit %d 1/4 ms\n",
130 } else if ((info->type == MCI_GPM_COEX_PROFILE_BNEP) && 138 mci->aggr_limit);
131 !info->master) { 139 } else if (mci->num_pan || mci->num_other_acl) {
132 btcoex->btcoex_period = 60; 140 /*
141 * For single PAN/FTP profile, allocate 35% for BT
142 * to improve WLAN throughput.
143 */
144 btcoex->duty_cycle = 35;
145 btcoex->btcoex_period = 53;
133 ath_dbg(common, MCI, 146 ath_dbg(common, MCI,
134 "Single slave PAN/FTP, bt period 60 ms\n"); 147 "Single PAN/FTP bt period %d ms dutycycle %d\n",
135 } else if ((info->type == MCI_GPM_COEX_PROFILE_HID) && 148 btcoex->duty_cycle, btcoex->btcoex_period);
136 (info->T > 0 && info->T < 50) && 149 } else if (mci->num_hid) {
137 (info->A > 1 || info->W > 1)) {
138 btcoex->duty_cycle = 30; 150 btcoex->duty_cycle = 30;
139 mci->aggr_limit = 8; 151 mci->aggr_limit = 6;
140 ath_dbg(common, MCI, 152 ath_dbg(common, MCI,
141 "Multiple attempt/timeout single HID " 153 "Multiple attempt/timeout single HID "
142 "aggregation limit 2 ms dutycycle 30%%\n"); 154 "aggregation limit 1.5 ms dutycycle 30%%\n");
143 } 155 }
144 } else if ((num_profile == 2) && (mci->num_hid == 2)) { 156 } else if (num_profile == 2) {
145 btcoex->duty_cycle = 30; 157 if (mci->num_hid == 2)
146 mci->aggr_limit = 8; 158 btcoex->duty_cycle = 30;
147 ath_dbg(common, MCI,
148 "Two HIDs aggregation limit 2 ms dutycycle 30%%\n");
149 } else if (num_profile > 3) {
150 mci->aggr_limit = 6; 159 mci->aggr_limit = 6;
151 ath_dbg(common, MCI, 160 ath_dbg(common, MCI,
152 "Three or more profiles aggregation limit 1.5 ms\n"); 161 "Two BT profiles aggr limit 1.5 ms dutycycle %d%%\n",
162 btcoex->duty_cycle);
163 } else if (num_profile >= 3) {
164 mci->aggr_limit = 4;
165 ath_dbg(common, MCI,
166 "Three or more profiles aggregation limit 1 ms\n");
153 } 167 }
154 168
169skip_tuning:
155 if (IS_CHAN_2GHZ(sc->sc_ah->curchan)) { 170 if (IS_CHAN_2GHZ(sc->sc_ah->curchan)) {
156 if (IS_CHAN_HT(sc->sc_ah->curchan)) 171 if (IS_CHAN_HT(sc->sc_ah->curchan))
157 ath_mci_adjust_aggr_limit(btcoex); 172 ath_mci_adjust_aggr_limit(btcoex);
@@ -159,18 +174,17 @@ static void ath_mci_update_scheme(struct ath_softc *sc)
159 btcoex->btcoex_period >>= 1; 174 btcoex->btcoex_period >>= 1;
160 } 175 }
161 176
162 ath9k_hw_btcoex_disable(sc->sc_ah);
163 ath9k_btcoex_timer_pause(sc); 177 ath9k_btcoex_timer_pause(sc);
178 ath9k_hw_btcoex_disable(sc->sc_ah);
164 179
165 if (IS_CHAN_5GHZ(sc->sc_ah->curchan)) 180 if (IS_CHAN_5GHZ(sc->sc_ah->curchan))
166 return; 181 return;
167 182
168 btcoex->duty_cycle += (mci->num_bdr ? ATH_MCI_MAX_DUTY_CYCLE : 0); 183 btcoex->duty_cycle += (mci->num_bdr ? ATH_MCI_BDR_DUTY_CYCLE : 0);
169 if (btcoex->duty_cycle > ATH_MCI_MAX_DUTY_CYCLE) 184 if (btcoex->duty_cycle > ATH_MCI_MAX_DUTY_CYCLE)
170 btcoex->duty_cycle = ATH_MCI_MAX_DUTY_CYCLE; 185 btcoex->duty_cycle = ATH_MCI_MAX_DUTY_CYCLE;
171 186
172 btcoex->btcoex_period *= 1000; 187 btcoex->btcoex_no_stomp = btcoex->btcoex_period * 1000 *
173 btcoex->btcoex_no_stomp = btcoex->btcoex_period *
174 (100 - btcoex->duty_cycle) / 100; 188 (100 - btcoex->duty_cycle) / 100;
175 189
176 ath9k_hw_btcoex_enable(sc->sc_ah); 190 ath9k_hw_btcoex_enable(sc->sc_ah);
@@ -181,20 +195,16 @@ static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
181{ 195{
182 struct ath_hw *ah = sc->sc_ah; 196 struct ath_hw *ah = sc->sc_ah;
183 struct ath_common *common = ath9k_hw_common(ah); 197 struct ath_common *common = ath9k_hw_common(ah);
198 struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
184 u32 payload[4] = {0, 0, 0, 0}; 199 u32 payload[4] = {0, 0, 0, 0};
185 200
186 switch (opcode) { 201 switch (opcode) {
187 case MCI_GPM_BT_CAL_REQ: 202 case MCI_GPM_BT_CAL_REQ:
188 if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) { 203 if (mci_hw->bt_state == MCI_BT_AWAKE) {
189 ar9003_mci_state(ah, MCI_STATE_SET_BT_CAL_START, NULL); 204 ar9003_mci_state(ah, MCI_STATE_SET_BT_CAL_START);
190 ieee80211_queue_work(sc->hw, &sc->hw_reset_work); 205 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
191 } else {
192 ath_dbg(common, MCI, "MCI State mismatch: %d\n",
193 ar9003_mci_state(ah, MCI_STATE_BT, NULL));
194 } 206 }
195 break; 207 ath_dbg(common, MCI, "MCI State : %d\n", mci_hw->bt_state);
196 case MCI_GPM_BT_CAL_DONE:
197 ar9003_mci_state(ah, MCI_STATE_BT, NULL);
198 break; 208 break;
199 case MCI_GPM_BT_CAL_GRANT: 209 case MCI_GPM_BT_CAL_GRANT:
200 MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_DONE); 210 MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_DONE);
@@ -207,32 +217,55 @@ static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
207 } 217 }
208} 218}
209 219
220static void ath9k_mci_work(struct work_struct *work)
221{
222 struct ath_softc *sc = container_of(work, struct ath_softc, mci_work);
223
224 ath_mci_update_scheme(sc);
225}
226
210static void ath_mci_process_profile(struct ath_softc *sc, 227static void ath_mci_process_profile(struct ath_softc *sc,
211 struct ath_mci_profile_info *info) 228 struct ath_mci_profile_info *info)
212{ 229{
213 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 230 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
214 struct ath_btcoex *btcoex = &sc->btcoex; 231 struct ath_btcoex *btcoex = &sc->btcoex;
215 struct ath_mci_profile *mci = &btcoex->mci; 232 struct ath_mci_profile *mci = &btcoex->mci;
233 struct ath_mci_profile_info *entry = NULL;
234
235 entry = ath_mci_find_profile(mci, info);
236 if (entry) {
237 /*
238 * Two MCI interrupts are generated while connecting to
239 * headset and A2DP profile, but only one MCI interrupt
240 * is generated with last added profile type while disconnecting
241 * both profiles.
242 * So while adding second profile type decrement
243 * the first one.
244 */
245 if (entry->type != info->type) {
246 DEC_PROF(mci, entry);
247 INC_PROF(mci, info);
248 }
249 memcpy(entry, info, 10);
250 }
216 251
217 if (info->start) { 252 if (info->start) {
218 if (!ath_mci_add_profile(common, mci, info)) 253 if (!entry && !ath_mci_add_profile(common, mci, info))
219 return; 254 return;
220 } else 255 } else
221 ath_mci_del_profile(common, mci, info); 256 ath_mci_del_profile(common, mci, entry);
222 257
223 btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD; 258 btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD;
224 mci->aggr_limit = mci->num_sco ? 6 : 0; 259 mci->aggr_limit = mci->num_sco ? 6 : 0;
225 260
226 if (NUM_PROF(mci)) { 261 btcoex->duty_cycle = ath_mci_duty_cycle[NUM_PROF(mci)];
262 if (NUM_PROF(mci))
227 btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW; 263 btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
228 btcoex->duty_cycle = ath_mci_duty_cycle[NUM_PROF(mci)]; 264 else
229 } else {
230 btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL : 265 btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL :
231 ATH_BTCOEX_STOMP_LOW; 266 ATH_BTCOEX_STOMP_LOW;
232 btcoex->duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
233 }
234 267
235 ath_mci_update_scheme(sc); 268 ieee80211_queue_work(sc->hw, &sc->mci_work);
236} 269}
237 270
238static void ath_mci_process_status(struct ath_softc *sc, 271static void ath_mci_process_status(struct ath_softc *sc,
@@ -247,8 +280,6 @@ static void ath_mci_process_status(struct ath_softc *sc,
247 if (status->is_link) 280 if (status->is_link)
248 return; 281 return;
249 282
250 memset(&info, 0, sizeof(struct ath_mci_profile_info));
251
252 info.conn_handle = status->conn_handle; 283 info.conn_handle = status->conn_handle;
253 if (ath_mci_find_profile(mci, &info)) 284 if (ath_mci_find_profile(mci, &info))
254 return; 285 return;
@@ -268,7 +299,7 @@ static void ath_mci_process_status(struct ath_softc *sc,
268 } while (++i < ATH_MCI_MAX_PROFILE); 299 } while (++i < ATH_MCI_MAX_PROFILE);
269 300
270 if (old_num_mgmt != mci->num_mgmt) 301 if (old_num_mgmt != mci->num_mgmt)
271 ath_mci_update_scheme(sc); 302 ieee80211_queue_work(sc->hw, &sc->mci_work);
272} 303}
273 304
274static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload) 305static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
@@ -277,25 +308,20 @@ static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
277 struct ath_mci_profile_info profile_info; 308 struct ath_mci_profile_info profile_info;
278 struct ath_mci_profile_status profile_status; 309 struct ath_mci_profile_status profile_status;
279 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 310 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
280 u32 version; 311 u8 major, minor;
281 u8 major;
282 u8 minor;
283 u32 seq_num; 312 u32 seq_num;
284 313
285 switch (opcode) { 314 switch (opcode) {
286 case MCI_GPM_COEX_VERSION_QUERY: 315 case MCI_GPM_COEX_VERSION_QUERY:
287 version = ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_COEX_VERSION, 316 ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_COEX_VERSION);
288 NULL);
289 break; 317 break;
290 case MCI_GPM_COEX_VERSION_RESPONSE: 318 case MCI_GPM_COEX_VERSION_RESPONSE:
291 major = *(rx_payload + MCI_GPM_COEX_B_MAJOR_VERSION); 319 major = *(rx_payload + MCI_GPM_COEX_B_MAJOR_VERSION);
292 minor = *(rx_payload + MCI_GPM_COEX_B_MINOR_VERSION); 320 minor = *(rx_payload + MCI_GPM_COEX_B_MINOR_VERSION);
293 version = (major << 8) + minor; 321 ar9003_mci_set_bt_version(ah, major, minor);
294 version = ar9003_mci_state(ah, MCI_STATE_SET_BT_COEX_VERSION,
295 &version);
296 break; 322 break;
297 case MCI_GPM_COEX_STATUS_QUERY: 323 case MCI_GPM_COEX_STATUS_QUERY:
298 ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_CHANNELS, NULL); 324 ar9003_mci_send_wlan_channels(ah);
299 break; 325 break;
300 case MCI_GPM_COEX_BT_PROFILE_INFO: 326 case MCI_GPM_COEX_BT_PROFILE_INFO:
301 memcpy(&profile_info, 327 memcpy(&profile_info,
@@ -322,7 +348,7 @@ static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
322 348
323 seq_num = *((u32 *)(rx_payload + 12)); 349 seq_num = *((u32 *)(rx_payload + 12));
324 ath_dbg(common, MCI, 350 ath_dbg(common, MCI,
325 "BT_Status_Update: is_link=%d, linkId=%d, state=%d, SEQ=%d\n", 351 "BT_Status_Update: is_link=%d, linkId=%d, state=%d, SEQ=%u\n",
326 profile_status.is_link, profile_status.conn_handle, 352 profile_status.is_link, profile_status.conn_handle,
327 profile_status.is_critical, seq_num); 353 profile_status.is_critical, seq_num);
328 354
@@ -362,6 +388,7 @@ int ath_mci_setup(struct ath_softc *sc)
362 mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4), 388 mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4),
363 mci->sched_buf.bf_paddr); 389 mci->sched_buf.bf_paddr);
364 390
391 INIT_WORK(&sc->mci_work, ath9k_mci_work);
365 ath_dbg(common, MCI, "MCI Initialized\n"); 392 ath_dbg(common, MCI, "MCI Initialized\n");
366 393
367 return 0; 394 return 0;
@@ -389,6 +416,7 @@ void ath_mci_intr(struct ath_softc *sc)
389 struct ath_mci_coex *mci = &sc->mci_coex; 416 struct ath_mci_coex *mci = &sc->mci_coex;
390 struct ath_hw *ah = sc->sc_ah; 417 struct ath_hw *ah = sc->sc_ah;
391 struct ath_common *common = ath9k_hw_common(ah); 418 struct ath_common *common = ath9k_hw_common(ah);
419 struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
392 u32 mci_int, mci_int_rxmsg; 420 u32 mci_int, mci_int_rxmsg;
393 u32 offset, subtype, opcode; 421 u32 offset, subtype, opcode;
394 u32 *pgpm; 422 u32 *pgpm;
@@ -397,8 +425,8 @@ void ath_mci_intr(struct ath_softc *sc)
397 425
398 ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg); 426 ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg);
399 427
400 if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) == 0) { 428 if (ar9003_mci_state(ah, MCI_STATE_ENABLE) == 0) {
401 ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET, NULL); 429 ar9003_mci_get_next_gpm_offset(ah, true, NULL);
402 return; 430 return;
403 } 431 }
404 432
@@ -417,46 +445,41 @@ void ath_mci_intr(struct ath_softc *sc)
417 NULL, 0, true, false); 445 NULL, 0, true, false);
418 446
419 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE; 447 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE;
420 ar9003_mci_state(ah, MCI_STATE_RESET_REQ_WAKE, NULL); 448 ar9003_mci_state(ah, MCI_STATE_RESET_REQ_WAKE);
421 449
422 /* 450 /*
423 * always do this for recovery and 2G/5G toggling and LNA_TRANS 451 * always do this for recovery and 2G/5G toggling and LNA_TRANS
424 */ 452 */
425 ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE, NULL); 453 ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE);
426 } 454 }
427 455
428 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING) { 456 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING) {
429 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING; 457 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING;
430 458
431 if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_SLEEP) { 459 if ((mci_hw->bt_state == MCI_BT_SLEEP) &&
432 if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL) != 460 (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP) !=
433 MCI_BT_SLEEP) 461 MCI_BT_SLEEP))
434 ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE, 462 ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE);
435 NULL);
436 }
437 } 463 }
438 464
439 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) { 465 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) {
440 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING; 466 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING;
441 467
442 if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) { 468 if ((mci_hw->bt_state == MCI_BT_AWAKE) &&
443 if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL) != 469 (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP) !=
444 MCI_BT_AWAKE) 470 MCI_BT_AWAKE))
445 ar9003_mci_state(ah, MCI_STATE_SET_BT_SLEEP, 471 mci_hw->bt_state = MCI_BT_SLEEP;
446 NULL);
447 }
448 } 472 }
449 473
450 if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) || 474 if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
451 (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) { 475 (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) {
452 ar9003_mci_state(ah, MCI_STATE_RECOVER_RX, NULL); 476 ar9003_mci_state(ah, MCI_STATE_RECOVER_RX);
453 skip_gpm = true; 477 skip_gpm = true;
454 } 478 }
455 479
456 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO) { 480 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO) {
457 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO; 481 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO;
458 offset = ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET, 482 offset = ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET);
459 NULL);
460 } 483 }
461 484
462 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_GPM) { 485 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_GPM) {
@@ -465,8 +488,8 @@ void ath_mci_intr(struct ath_softc *sc)
465 while (more_data == MCI_GPM_MORE) { 488 while (more_data == MCI_GPM_MORE) {
466 489
467 pgpm = mci->gpm_buf.bf_addr; 490 pgpm = mci->gpm_buf.bf_addr;
468 offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET, 491 offset = ar9003_mci_get_next_gpm_offset(ah, false,
469 &more_data); 492 &more_data);
470 493
471 if (offset == MCI_GPM_INVALID) 494 if (offset == MCI_GPM_INVALID)
472 break; 495 break;
@@ -507,23 +530,17 @@ void ath_mci_intr(struct ath_softc *sc)
507 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_INFO; 530 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_INFO;
508 531
509 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) { 532 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) {
510 int value_dbm = ar9003_mci_state(ah, 533 int value_dbm = MS(mci_hw->cont_status,
511 MCI_STATE_CONT_RSSI_POWER, NULL); 534 AR_MCI_CONT_RSSI_POWER);
512 535
513 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_INFO; 536 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_INFO;
514 537
515 if (ar9003_mci_state(ah, MCI_STATE_CONT_TXRX, NULL)) 538 ath_dbg(common, MCI,
516 ath_dbg(common, MCI, 539 "MCI CONT_INFO: (%s) pri = %d pwr = %d dBm\n",
517 "MCI CONT_INFO: (tx) pri = %d, pwr = %d dBm\n", 540 MS(mci_hw->cont_status, AR_MCI_CONT_TXRX) ?
518 ar9003_mci_state(ah, 541 "tx" : "rx",
519 MCI_STATE_CONT_PRIORITY, NULL), 542 MS(mci_hw->cont_status, AR_MCI_CONT_PRIORITY),
520 value_dbm); 543 value_dbm);
521 else
522 ath_dbg(common, MCI,
523 "MCI CONT_INFO: (rx) pri = %d,pwr = %d dBm\n",
524 ar9003_mci_state(ah,
525 MCI_STATE_CONT_PRIORITY, NULL),
526 value_dbm);
527 } 544 }
528 545
529 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_NACK) 546 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_NACK)
@@ -538,3 +555,14 @@ void ath_mci_intr(struct ath_softc *sc)
538 mci_int &= ~(AR_MCI_INTERRUPT_RX_INVALID_HDR | 555 mci_int &= ~(AR_MCI_INTERRUPT_RX_INVALID_HDR |
539 AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT); 556 AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT);
540} 557}
558
559void ath_mci_enable(struct ath_softc *sc)
560{
561 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
562
563 if (!common->btcoex_enabled)
564 return;
565
566 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
567 sc->sc_ah->imask |= ATH9K_INT_MCI;
568}
diff --git a/drivers/net/wireless/ath/ath9k/mci.h b/drivers/net/wireless/ath/ath9k/mci.h
index c841444f53c2..fc14eea034eb 100644
--- a/drivers/net/wireless/ath/ath9k/mci.h
+++ b/drivers/net/wireless/ath/ath9k/mci.h
@@ -130,4 +130,13 @@ void ath_mci_flush_profile(struct ath_mci_profile *mci);
130int ath_mci_setup(struct ath_softc *sc); 130int ath_mci_setup(struct ath_softc *sc);
131void ath_mci_cleanup(struct ath_softc *sc); 131void ath_mci_cleanup(struct ath_softc *sc);
132void ath_mci_intr(struct ath_softc *sc); 132void ath_mci_intr(struct ath_softc *sc);
133#endif 133
134#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
135void ath_mci_enable(struct ath_softc *sc);
136#else
137static inline void ath_mci_enable(struct ath_softc *sc)
138{
139}
140#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
141
142#endif /* MCI_H*/
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index a856b51255f4..aa0e83ac51f4 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -115,6 +115,9 @@ static void ath_pci_aspm_init(struct ath_common *common)
115 int pos; 115 int pos;
116 u8 aspm; 116 u8 aspm;
117 117
118 if (!ah->is_pciexpress)
119 return;
120
118 pos = pci_pcie_cap(pdev); 121 pos = pci_pcie_cap(pdev);
119 if (!pos) 122 if (!pos)
120 return; 123 return;
@@ -138,6 +141,7 @@ static void ath_pci_aspm_init(struct ath_common *common)
138 aspm &= ~(PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); 141 aspm &= ~(PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
139 pci_write_config_byte(parent, pos + PCI_EXP_LNKCTL, aspm); 142 pci_write_config_byte(parent, pos + PCI_EXP_LNKCTL, aspm);
140 143
144 ath_info(common, "Disabling ASPM since BTCOEX is enabled\n");
141 return; 145 return;
142 } 146 }
143 147
@@ -147,6 +151,7 @@ static void ath_pci_aspm_init(struct ath_common *common)
147 ah->aspm_enabled = true; 151 ah->aspm_enabled = true;
148 /* Initialize PCIe PM and SERDES registers. */ 152 /* Initialize PCIe PM and SERDES registers. */
149 ath9k_hw_configpcipowersave(ah, false); 153 ath9k_hw_configpcipowersave(ah, false);
154 ath_info(common, "ASPM enabled: 0x%x\n", aspm);
150 } 155 }
151} 156}
152 157
@@ -246,7 +251,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
246 sc->mem = mem; 251 sc->mem = mem;
247 252
248 /* Will be cleared in ath9k_start() */ 253 /* Will be cleared in ath9k_start() */
249 sc->sc_flags |= SC_OP_INVALID; 254 set_bit(SC_OP_INVALID, &sc->sc_flags);
250 255
251 ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc); 256 ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc);
252 if (ret) { 257 if (ret) {
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 92a6c0a87f89..e034add9cd5a 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -770,7 +770,7 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
770 struct ieee80211_tx_rate *rates = tx_info->control.rates; 770 struct ieee80211_tx_rate *rates = tx_info->control.rates;
771 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 771 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
772 __le16 fc = hdr->frame_control; 772 __le16 fc = hdr->frame_control;
773 u8 try_per_rate, i = 0, rix, high_rix; 773 u8 try_per_rate, i = 0, rix;
774 int is_probe = 0; 774 int is_probe = 0;
775 775
776 if (rate_control_send_low(sta, priv_sta, txrc)) 776 if (rate_control_send_low(sta, priv_sta, txrc))
@@ -791,7 +791,6 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
791 rate_table = ath_rc_priv->rate_table; 791 rate_table = ath_rc_priv->rate_table;
792 rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table, 792 rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table,
793 &is_probe, false); 793 &is_probe, false);
794 high_rix = rix;
795 794
796 /* 795 /*
797 * If we're in HT mode and both us and our peer supports LDPC. 796 * If we're in HT mode and both us and our peer supports LDPC.
@@ -839,16 +838,16 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
839 try_per_rate = 8; 838 try_per_rate = 8;
840 839
841 /* 840 /*
842 * Use a legacy rate as last retry to ensure that the frame 841 * If the last rate in the rate series is MCS and has
843 * is tried in both MCS and legacy rates. 842 * more than 80% of per thresh, then use a legacy rate
843 * as last retry to ensure that the frame is tried in both
844 * MCS and legacy rate.
844 */ 845 */
845 if ((rates[2].flags & IEEE80211_TX_RC_MCS) && 846 ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
846 (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU) || 847 if (WLAN_RC_PHY_HT(rate_table->info[rix].phy) &&
847 (ath_rc_priv->per[high_rix] > 45))) 848 (ath_rc_priv->per[rix] > 45))
848 rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table, 849 rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table,
849 &is_probe, true); 850 &is_probe, true);
850 else
851 ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
852 851
853 /* All other rates in the series have RTS enabled */ 852 /* All other rates in the series have RTS enabled */
854 ath_rc_rate_set_series(rate_table, &rates[i], txrc, 853 ath_rc_rate_set_series(rate_table, &rates[i], txrc,
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 0735aeb3b26c..11f3703a420a 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -20,43 +20,6 @@
20 20
21#define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb)) 21#define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb))
22 22
23static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta,
24 int mindelta, int main_rssi_avg,
25 int alt_rssi_avg, int pkt_count)
26{
27 return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
28 (alt_rssi_avg > main_rssi_avg + maxdelta)) ||
29 (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50);
30}
31
32static inline bool ath_ant_div_comb_alt_check(u8 div_group, int alt_ratio,
33 int curr_main_set, int curr_alt_set,
34 int alt_rssi_avg, int main_rssi_avg)
35{
36 bool result = false;
37 switch (div_group) {
38 case 0:
39 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
40 result = true;
41 break;
42 case 1:
43 case 2:
44 if ((((curr_main_set == ATH_ANT_DIV_COMB_LNA2) &&
45 (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) &&
46 (alt_rssi_avg >= (main_rssi_avg - 5))) ||
47 ((curr_main_set == ATH_ANT_DIV_COMB_LNA1) &&
48 (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) &&
49 (alt_rssi_avg >= (main_rssi_avg - 2)))) &&
50 (alt_rssi_avg >= 4))
51 result = true;
52 else
53 result = false;
54 break;
55 }
56
57 return result;
58}
59
60static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) 23static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
61{ 24{
62 return sc->ps_enabled && 25 return sc->ps_enabled &&
@@ -303,7 +266,7 @@ static void ath_edma_start_recv(struct ath_softc *sc)
303 266
304 ath_opmode_init(sc); 267 ath_opmode_init(sc);
305 268
306 ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_OFFCHANNEL)); 269 ath9k_hw_startpcureceive(sc->sc_ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL));
307 270
308 spin_unlock_bh(&sc->rx.rxbuflock); 271 spin_unlock_bh(&sc->rx.rxbuflock);
309} 272}
@@ -322,8 +285,8 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
322 int error = 0; 285 int error = 0;
323 286
324 spin_lock_init(&sc->sc_pcu_lock); 287 spin_lock_init(&sc->sc_pcu_lock);
325 sc->sc_flags &= ~SC_OP_RXFLUSH;
326 spin_lock_init(&sc->rx.rxbuflock); 288 spin_lock_init(&sc->rx.rxbuflock);
289 clear_bit(SC_OP_RXFLUSH, &sc->sc_flags);
327 290
328 common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 + 291 common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
329 sc->sc_ah->caps.rx_status_len; 292 sc->sc_ah->caps.rx_status_len;
@@ -467,6 +430,9 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
467 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; 430 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
468 } 431 }
469 432
433 if (AR_SREV_9550(sc->sc_ah))
434 rfilt |= ATH9K_RX_FILTER_4ADDRESS;
435
470 return rfilt; 436 return rfilt;
471 437
472} 438}
@@ -500,7 +466,7 @@ int ath_startrecv(struct ath_softc *sc)
500 466
501start_recv: 467start_recv:
502 ath_opmode_init(sc); 468 ath_opmode_init(sc);
503 ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL)); 469 ath9k_hw_startpcureceive(ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL));
504 470
505 spin_unlock_bh(&sc->rx.rxbuflock); 471 spin_unlock_bh(&sc->rx.rxbuflock);
506 472
@@ -535,11 +501,11 @@ bool ath_stoprecv(struct ath_softc *sc)
535 501
536void ath_flushrecv(struct ath_softc *sc) 502void ath_flushrecv(struct ath_softc *sc)
537{ 503{
538 sc->sc_flags |= SC_OP_RXFLUSH; 504 set_bit(SC_OP_RXFLUSH, &sc->sc_flags);
539 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 505 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
540 ath_rx_tasklet(sc, 1, true); 506 ath_rx_tasklet(sc, 1, true);
541 ath_rx_tasklet(sc, 1, false); 507 ath_rx_tasklet(sc, 1, false);
542 sc->sc_flags &= ~SC_OP_RXFLUSH; 508 clear_bit(SC_OP_RXFLUSH, &sc->sc_flags);
543} 509}
544 510
545static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) 511static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
@@ -624,13 +590,13 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon)
624 590
625 /* Process Beacon and CAB receive in PS state */ 591 /* Process Beacon and CAB receive in PS state */
626 if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc)) 592 if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc))
627 && mybeacon) 593 && mybeacon) {
628 ath_rx_ps_beacon(sc, skb); 594 ath_rx_ps_beacon(sc, skb);
629 else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && 595 } else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
630 (ieee80211_is_data(hdr->frame_control) || 596 (ieee80211_is_data(hdr->frame_control) ||
631 ieee80211_is_action(hdr->frame_control)) && 597 ieee80211_is_action(hdr->frame_control)) &&
632 is_multicast_ether_addr(hdr->addr1) && 598 is_multicast_ether_addr(hdr->addr1) &&
633 !ieee80211_has_moredata(hdr->frame_control)) { 599 !ieee80211_has_moredata(hdr->frame_control)) {
634 /* 600 /*
635 * No more broadcast/multicast frames to be received at this 601 * No more broadcast/multicast frames to be received at this
636 * point. 602 * point.
@@ -1068,709 +1034,6 @@ static void ath9k_rx_skb_postprocess(struct ath_common *common,
1068 rxs->flag &= ~RX_FLAG_DECRYPTED; 1034 rxs->flag &= ~RX_FLAG_DECRYPTED;
1069} 1035}
1070 1036
1071static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb,
1072 struct ath_hw_antcomb_conf ant_conf,
1073 int main_rssi_avg)
1074{
1075 antcomb->quick_scan_cnt = 0;
1076
1077 if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
1078 antcomb->rssi_lna2 = main_rssi_avg;
1079 else if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA1)
1080 antcomb->rssi_lna1 = main_rssi_avg;
1081
1082 switch ((ant_conf.main_lna_conf << 4) | ant_conf.alt_lna_conf) {
1083 case 0x10: /* LNA2 A-B */
1084 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1085 antcomb->first_quick_scan_conf =
1086 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1087 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
1088 break;
1089 case 0x20: /* LNA1 A-B */
1090 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1091 antcomb->first_quick_scan_conf =
1092 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1093 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
1094 break;
1095 case 0x21: /* LNA1 LNA2 */
1096 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA2;
1097 antcomb->first_quick_scan_conf =
1098 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1099 antcomb->second_quick_scan_conf =
1100 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1101 break;
1102 case 0x12: /* LNA2 LNA1 */
1103 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1;
1104 antcomb->first_quick_scan_conf =
1105 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1106 antcomb->second_quick_scan_conf =
1107 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1108 break;
1109 case 0x13: /* LNA2 A+B */
1110 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1111 antcomb->first_quick_scan_conf =
1112 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1113 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
1114 break;
1115 case 0x23: /* LNA1 A+B */
1116 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1117 antcomb->first_quick_scan_conf =
1118 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1119 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
1120 break;
1121 default:
1122 break;
1123 }
1124}
1125
1126static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
1127 struct ath_hw_antcomb_conf *div_ant_conf,
1128 int main_rssi_avg, int alt_rssi_avg,
1129 int alt_ratio)
1130{
1131 /* alt_good */
1132 switch (antcomb->quick_scan_cnt) {
1133 case 0:
1134 /* set alt to main, and alt to first conf */
1135 div_ant_conf->main_lna_conf = antcomb->main_conf;
1136 div_ant_conf->alt_lna_conf = antcomb->first_quick_scan_conf;
1137 break;
1138 case 1:
1139 /* set alt to main, and alt to first conf */
1140 div_ant_conf->main_lna_conf = antcomb->main_conf;
1141 div_ant_conf->alt_lna_conf = antcomb->second_quick_scan_conf;
1142 antcomb->rssi_first = main_rssi_avg;
1143 antcomb->rssi_second = alt_rssi_avg;
1144
1145 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
1146 /* main is LNA1 */
1147 if (ath_is_alt_ant_ratio_better(alt_ratio,
1148 ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
1149 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1150 main_rssi_avg, alt_rssi_avg,
1151 antcomb->total_pkt_count))
1152 antcomb->first_ratio = true;
1153 else
1154 antcomb->first_ratio = false;
1155 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
1156 if (ath_is_alt_ant_ratio_better(alt_ratio,
1157 ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
1158 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1159 main_rssi_avg, alt_rssi_avg,
1160 antcomb->total_pkt_count))
1161 antcomb->first_ratio = true;
1162 else
1163 antcomb->first_ratio = false;
1164 } else {
1165 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
1166 (alt_rssi_avg > main_rssi_avg +
1167 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
1168 (alt_rssi_avg > main_rssi_avg)) &&
1169 (antcomb->total_pkt_count > 50))
1170 antcomb->first_ratio = true;
1171 else
1172 antcomb->first_ratio = false;
1173 }
1174 break;
1175 case 2:
1176 antcomb->alt_good = false;
1177 antcomb->scan_not_start = false;
1178 antcomb->scan = false;
1179 antcomb->rssi_first = main_rssi_avg;
1180 antcomb->rssi_third = alt_rssi_avg;
1181
1182 if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1)
1183 antcomb->rssi_lna1 = alt_rssi_avg;
1184 else if (antcomb->second_quick_scan_conf ==
1185 ATH_ANT_DIV_COMB_LNA2)
1186 antcomb->rssi_lna2 = alt_rssi_avg;
1187 else if (antcomb->second_quick_scan_conf ==
1188 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) {
1189 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)
1190 antcomb->rssi_lna2 = main_rssi_avg;
1191 else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1)
1192 antcomb->rssi_lna1 = main_rssi_avg;
1193 }
1194
1195 if (antcomb->rssi_lna2 > antcomb->rssi_lna1 +
1196 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)
1197 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
1198 else
1199 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
1200
1201 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
1202 if (ath_is_alt_ant_ratio_better(alt_ratio,
1203 ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
1204 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1205 main_rssi_avg, alt_rssi_avg,
1206 antcomb->total_pkt_count))
1207 antcomb->second_ratio = true;
1208 else
1209 antcomb->second_ratio = false;
1210 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
1211 if (ath_is_alt_ant_ratio_better(alt_ratio,
1212 ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
1213 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1214 main_rssi_avg, alt_rssi_avg,
1215 antcomb->total_pkt_count))
1216 antcomb->second_ratio = true;
1217 else
1218 antcomb->second_ratio = false;
1219 } else {
1220 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
1221 (alt_rssi_avg > main_rssi_avg +
1222 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
1223 (alt_rssi_avg > main_rssi_avg)) &&
1224 (antcomb->total_pkt_count > 50))
1225 antcomb->second_ratio = true;
1226 else
1227 antcomb->second_ratio = false;
1228 }
1229
1230 /* set alt to the conf with maximun ratio */
1231 if (antcomb->first_ratio && antcomb->second_ratio) {
1232 if (antcomb->rssi_second > antcomb->rssi_third) {
1233 /* first alt*/
1234 if ((antcomb->first_quick_scan_conf ==
1235 ATH_ANT_DIV_COMB_LNA1) ||
1236 (antcomb->first_quick_scan_conf ==
1237 ATH_ANT_DIV_COMB_LNA2))
1238 /* Set alt LNA1 or LNA2*/
1239 if (div_ant_conf->main_lna_conf ==
1240 ATH_ANT_DIV_COMB_LNA2)
1241 div_ant_conf->alt_lna_conf =
1242 ATH_ANT_DIV_COMB_LNA1;
1243 else
1244 div_ant_conf->alt_lna_conf =
1245 ATH_ANT_DIV_COMB_LNA2;
1246 else
1247 /* Set alt to A+B or A-B */
1248 div_ant_conf->alt_lna_conf =
1249 antcomb->first_quick_scan_conf;
1250 } else if ((antcomb->second_quick_scan_conf ==
1251 ATH_ANT_DIV_COMB_LNA1) ||
1252 (antcomb->second_quick_scan_conf ==
1253 ATH_ANT_DIV_COMB_LNA2)) {
1254 /* Set alt LNA1 or LNA2 */
1255 if (div_ant_conf->main_lna_conf ==
1256 ATH_ANT_DIV_COMB_LNA2)
1257 div_ant_conf->alt_lna_conf =
1258 ATH_ANT_DIV_COMB_LNA1;
1259 else
1260 div_ant_conf->alt_lna_conf =
1261 ATH_ANT_DIV_COMB_LNA2;
1262 } else {
1263 /* Set alt to A+B or A-B */
1264 div_ant_conf->alt_lna_conf =
1265 antcomb->second_quick_scan_conf;
1266 }
1267 } else if (antcomb->first_ratio) {
1268 /* first alt */
1269 if ((antcomb->first_quick_scan_conf ==
1270 ATH_ANT_DIV_COMB_LNA1) ||
1271 (antcomb->first_quick_scan_conf ==
1272 ATH_ANT_DIV_COMB_LNA2))
1273 /* Set alt LNA1 or LNA2 */
1274 if (div_ant_conf->main_lna_conf ==
1275 ATH_ANT_DIV_COMB_LNA2)
1276 div_ant_conf->alt_lna_conf =
1277 ATH_ANT_DIV_COMB_LNA1;
1278 else
1279 div_ant_conf->alt_lna_conf =
1280 ATH_ANT_DIV_COMB_LNA2;
1281 else
1282 /* Set alt to A+B or A-B */
1283 div_ant_conf->alt_lna_conf =
1284 antcomb->first_quick_scan_conf;
1285 } else if (antcomb->second_ratio) {
1286 /* second alt */
1287 if ((antcomb->second_quick_scan_conf ==
1288 ATH_ANT_DIV_COMB_LNA1) ||
1289 (antcomb->second_quick_scan_conf ==
1290 ATH_ANT_DIV_COMB_LNA2))
1291 /* Set alt LNA1 or LNA2 */
1292 if (div_ant_conf->main_lna_conf ==
1293 ATH_ANT_DIV_COMB_LNA2)
1294 div_ant_conf->alt_lna_conf =
1295 ATH_ANT_DIV_COMB_LNA1;
1296 else
1297 div_ant_conf->alt_lna_conf =
1298 ATH_ANT_DIV_COMB_LNA2;
1299 else
1300 /* Set alt to A+B or A-B */
1301 div_ant_conf->alt_lna_conf =
1302 antcomb->second_quick_scan_conf;
1303 } else {
1304 /* main is largest */
1305 if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) ||
1306 (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2))
1307 /* Set alt LNA1 or LNA2 */
1308 if (div_ant_conf->main_lna_conf ==
1309 ATH_ANT_DIV_COMB_LNA2)
1310 div_ant_conf->alt_lna_conf =
1311 ATH_ANT_DIV_COMB_LNA1;
1312 else
1313 div_ant_conf->alt_lna_conf =
1314 ATH_ANT_DIV_COMB_LNA2;
1315 else
1316 /* Set alt to A+B or A-B */
1317 div_ant_conf->alt_lna_conf = antcomb->main_conf;
1318 }
1319 break;
1320 default:
1321 break;
1322 }
1323}
1324
1325static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
1326 struct ath_ant_comb *antcomb, int alt_ratio)
1327{
1328 if (ant_conf->div_group == 0) {
1329 /* Adjust the fast_div_bias based on main and alt lna conf */
1330 switch ((ant_conf->main_lna_conf << 4) |
1331 ant_conf->alt_lna_conf) {
1332 case 0x01: /* A-B LNA2 */
1333 ant_conf->fast_div_bias = 0x3b;
1334 break;
1335 case 0x02: /* A-B LNA1 */
1336 ant_conf->fast_div_bias = 0x3d;
1337 break;
1338 case 0x03: /* A-B A+B */
1339 ant_conf->fast_div_bias = 0x1;
1340 break;
1341 case 0x10: /* LNA2 A-B */
1342 ant_conf->fast_div_bias = 0x7;
1343 break;
1344 case 0x12: /* LNA2 LNA1 */
1345 ant_conf->fast_div_bias = 0x2;
1346 break;
1347 case 0x13: /* LNA2 A+B */
1348 ant_conf->fast_div_bias = 0x7;
1349 break;
1350 case 0x20: /* LNA1 A-B */
1351 ant_conf->fast_div_bias = 0x6;
1352 break;
1353 case 0x21: /* LNA1 LNA2 */
1354 ant_conf->fast_div_bias = 0x0;
1355 break;
1356 case 0x23: /* LNA1 A+B */
1357 ant_conf->fast_div_bias = 0x6;
1358 break;
1359 case 0x30: /* A+B A-B */
1360 ant_conf->fast_div_bias = 0x1;
1361 break;
1362 case 0x31: /* A+B LNA2 */
1363 ant_conf->fast_div_bias = 0x3b;
1364 break;
1365 case 0x32: /* A+B LNA1 */
1366 ant_conf->fast_div_bias = 0x3d;
1367 break;
1368 default:
1369 break;
1370 }
1371 } else if (ant_conf->div_group == 1) {
1372 /* Adjust the fast_div_bias based on main and alt_lna_conf */
1373 switch ((ant_conf->main_lna_conf << 4) |
1374 ant_conf->alt_lna_conf) {
1375 case 0x01: /* A-B LNA2 */
1376 ant_conf->fast_div_bias = 0x1;
1377 ant_conf->main_gaintb = 0;
1378 ant_conf->alt_gaintb = 0;
1379 break;
1380 case 0x02: /* A-B LNA1 */
1381 ant_conf->fast_div_bias = 0x1;
1382 ant_conf->main_gaintb = 0;
1383 ant_conf->alt_gaintb = 0;
1384 break;
1385 case 0x03: /* A-B A+B */
1386 ant_conf->fast_div_bias = 0x1;
1387 ant_conf->main_gaintb = 0;
1388 ant_conf->alt_gaintb = 0;
1389 break;
1390 case 0x10: /* LNA2 A-B */
1391 if (!(antcomb->scan) &&
1392 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1393 ant_conf->fast_div_bias = 0x3f;
1394 else
1395 ant_conf->fast_div_bias = 0x1;
1396 ant_conf->main_gaintb = 0;
1397 ant_conf->alt_gaintb = 0;
1398 break;
1399 case 0x12: /* LNA2 LNA1 */
1400 ant_conf->fast_div_bias = 0x1;
1401 ant_conf->main_gaintb = 0;
1402 ant_conf->alt_gaintb = 0;
1403 break;
1404 case 0x13: /* LNA2 A+B */
1405 if (!(antcomb->scan) &&
1406 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1407 ant_conf->fast_div_bias = 0x3f;
1408 else
1409 ant_conf->fast_div_bias = 0x1;
1410 ant_conf->main_gaintb = 0;
1411 ant_conf->alt_gaintb = 0;
1412 break;
1413 case 0x20: /* LNA1 A-B */
1414 if (!(antcomb->scan) &&
1415 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1416 ant_conf->fast_div_bias = 0x3f;
1417 else
1418 ant_conf->fast_div_bias = 0x1;
1419 ant_conf->main_gaintb = 0;
1420 ant_conf->alt_gaintb = 0;
1421 break;
1422 case 0x21: /* LNA1 LNA2 */
1423 ant_conf->fast_div_bias = 0x1;
1424 ant_conf->main_gaintb = 0;
1425 ant_conf->alt_gaintb = 0;
1426 break;
1427 case 0x23: /* LNA1 A+B */
1428 if (!(antcomb->scan) &&
1429 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1430 ant_conf->fast_div_bias = 0x3f;
1431 else
1432 ant_conf->fast_div_bias = 0x1;
1433 ant_conf->main_gaintb = 0;
1434 ant_conf->alt_gaintb = 0;
1435 break;
1436 case 0x30: /* A+B A-B */
1437 ant_conf->fast_div_bias = 0x1;
1438 ant_conf->main_gaintb = 0;
1439 ant_conf->alt_gaintb = 0;
1440 break;
1441 case 0x31: /* A+B LNA2 */
1442 ant_conf->fast_div_bias = 0x1;
1443 ant_conf->main_gaintb = 0;
1444 ant_conf->alt_gaintb = 0;
1445 break;
1446 case 0x32: /* A+B LNA1 */
1447 ant_conf->fast_div_bias = 0x1;
1448 ant_conf->main_gaintb = 0;
1449 ant_conf->alt_gaintb = 0;
1450 break;
1451 default:
1452 break;
1453 }
1454 } else if (ant_conf->div_group == 2) {
1455 /* Adjust the fast_div_bias based on main and alt_lna_conf */
1456 switch ((ant_conf->main_lna_conf << 4) |
1457 ant_conf->alt_lna_conf) {
1458 case 0x01: /* A-B LNA2 */
1459 ant_conf->fast_div_bias = 0x1;
1460 ant_conf->main_gaintb = 0;
1461 ant_conf->alt_gaintb = 0;
1462 break;
1463 case 0x02: /* A-B LNA1 */
1464 ant_conf->fast_div_bias = 0x1;
1465 ant_conf->main_gaintb = 0;
1466 ant_conf->alt_gaintb = 0;
1467 break;
1468 case 0x03: /* A-B A+B */
1469 ant_conf->fast_div_bias = 0x1;
1470 ant_conf->main_gaintb = 0;
1471 ant_conf->alt_gaintb = 0;
1472 break;
1473 case 0x10: /* LNA2 A-B */
1474 if (!(antcomb->scan) &&
1475 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1476 ant_conf->fast_div_bias = 0x1;
1477 else
1478 ant_conf->fast_div_bias = 0x2;
1479 ant_conf->main_gaintb = 0;
1480 ant_conf->alt_gaintb = 0;
1481 break;
1482 case 0x12: /* LNA2 LNA1 */
1483 ant_conf->fast_div_bias = 0x1;
1484 ant_conf->main_gaintb = 0;
1485 ant_conf->alt_gaintb = 0;
1486 break;
1487 case 0x13: /* LNA2 A+B */
1488 if (!(antcomb->scan) &&
1489 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1490 ant_conf->fast_div_bias = 0x1;
1491 else
1492 ant_conf->fast_div_bias = 0x2;
1493 ant_conf->main_gaintb = 0;
1494 ant_conf->alt_gaintb = 0;
1495 break;
1496 case 0x20: /* LNA1 A-B */
1497 if (!(antcomb->scan) &&
1498 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1499 ant_conf->fast_div_bias = 0x1;
1500 else
1501 ant_conf->fast_div_bias = 0x2;
1502 ant_conf->main_gaintb = 0;
1503 ant_conf->alt_gaintb = 0;
1504 break;
1505 case 0x21: /* LNA1 LNA2 */
1506 ant_conf->fast_div_bias = 0x1;
1507 ant_conf->main_gaintb = 0;
1508 ant_conf->alt_gaintb = 0;
1509 break;
1510 case 0x23: /* LNA1 A+B */
1511 if (!(antcomb->scan) &&
1512 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1513 ant_conf->fast_div_bias = 0x1;
1514 else
1515 ant_conf->fast_div_bias = 0x2;
1516 ant_conf->main_gaintb = 0;
1517 ant_conf->alt_gaintb = 0;
1518 break;
1519 case 0x30: /* A+B A-B */
1520 ant_conf->fast_div_bias = 0x1;
1521 ant_conf->main_gaintb = 0;
1522 ant_conf->alt_gaintb = 0;
1523 break;
1524 case 0x31: /* A+B LNA2 */
1525 ant_conf->fast_div_bias = 0x1;
1526 ant_conf->main_gaintb = 0;
1527 ant_conf->alt_gaintb = 0;
1528 break;
1529 case 0x32: /* A+B LNA1 */
1530 ant_conf->fast_div_bias = 0x1;
1531 ant_conf->main_gaintb = 0;
1532 ant_conf->alt_gaintb = 0;
1533 break;
1534 default:
1535 break;
1536 }
1537 }
1538}
1539
1540/* Antenna diversity and combining */
1541static void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
1542{
1543 struct ath_hw_antcomb_conf div_ant_conf;
1544 struct ath_ant_comb *antcomb = &sc->ant_comb;
1545 int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set;
1546 int curr_main_set;
1547 int main_rssi = rs->rs_rssi_ctl0;
1548 int alt_rssi = rs->rs_rssi_ctl1;
1549 int rx_ant_conf, main_ant_conf;
1550 bool short_scan = false;
1551
1552 rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) &
1553 ATH_ANT_RX_MASK;
1554 main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) &
1555 ATH_ANT_RX_MASK;
1556
1557 /* Record packet only when both main_rssi and alt_rssi is positive */
1558 if (main_rssi > 0 && alt_rssi > 0) {
1559 antcomb->total_pkt_count++;
1560 antcomb->main_total_rssi += main_rssi;
1561 antcomb->alt_total_rssi += alt_rssi;
1562 if (main_ant_conf == rx_ant_conf)
1563 antcomb->main_recv_cnt++;
1564 else
1565 antcomb->alt_recv_cnt++;
1566 }
1567
1568 /* Short scan check */
1569 if (antcomb->scan && antcomb->alt_good) {
1570 if (time_after(jiffies, antcomb->scan_start_time +
1571 msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR)))
1572 short_scan = true;
1573 else
1574 if (antcomb->total_pkt_count ==
1575 ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) {
1576 alt_ratio = ((antcomb->alt_recv_cnt * 100) /
1577 antcomb->total_pkt_count);
1578 if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
1579 short_scan = true;
1580 }
1581 }
1582
1583 if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) ||
1584 rs->rs_moreaggr) && !short_scan)
1585 return;
1586
1587 if (antcomb->total_pkt_count) {
1588 alt_ratio = ((antcomb->alt_recv_cnt * 100) /
1589 antcomb->total_pkt_count);
1590 main_rssi_avg = (antcomb->main_total_rssi /
1591 antcomb->total_pkt_count);
1592 alt_rssi_avg = (antcomb->alt_total_rssi /
1593 antcomb->total_pkt_count);
1594 }
1595
1596
1597 ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf);
1598 curr_alt_set = div_ant_conf.alt_lna_conf;
1599 curr_main_set = div_ant_conf.main_lna_conf;
1600
1601 antcomb->count++;
1602
1603 if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) {
1604 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) {
1605 ath_lnaconf_alt_good_scan(antcomb, div_ant_conf,
1606 main_rssi_avg);
1607 antcomb->alt_good = true;
1608 } else {
1609 antcomb->alt_good = false;
1610 }
1611
1612 antcomb->count = 0;
1613 antcomb->scan = true;
1614 antcomb->scan_not_start = true;
1615 }
1616
1617 if (!antcomb->scan) {
1618 if (ath_ant_div_comb_alt_check(div_ant_conf.div_group,
1619 alt_ratio, curr_main_set, curr_alt_set,
1620 alt_rssi_avg, main_rssi_avg)) {
1621 if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) {
1622 /* Switch main and alt LNA */
1623 div_ant_conf.main_lna_conf =
1624 ATH_ANT_DIV_COMB_LNA2;
1625 div_ant_conf.alt_lna_conf =
1626 ATH_ANT_DIV_COMB_LNA1;
1627 } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) {
1628 div_ant_conf.main_lna_conf =
1629 ATH_ANT_DIV_COMB_LNA1;
1630 div_ant_conf.alt_lna_conf =
1631 ATH_ANT_DIV_COMB_LNA2;
1632 }
1633
1634 goto div_comb_done;
1635 } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) &&
1636 (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) {
1637 /* Set alt to another LNA */
1638 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2)
1639 div_ant_conf.alt_lna_conf =
1640 ATH_ANT_DIV_COMB_LNA1;
1641 else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1)
1642 div_ant_conf.alt_lna_conf =
1643 ATH_ANT_DIV_COMB_LNA2;
1644
1645 goto div_comb_done;
1646 }
1647
1648 if ((alt_rssi_avg < (main_rssi_avg +
1649 div_ant_conf.lna1_lna2_delta)))
1650 goto div_comb_done;
1651 }
1652
1653 if (!antcomb->scan_not_start) {
1654 switch (curr_alt_set) {
1655 case ATH_ANT_DIV_COMB_LNA2:
1656 antcomb->rssi_lna2 = alt_rssi_avg;
1657 antcomb->rssi_lna1 = main_rssi_avg;
1658 antcomb->scan = true;
1659 /* set to A+B */
1660 div_ant_conf.main_lna_conf =
1661 ATH_ANT_DIV_COMB_LNA1;
1662 div_ant_conf.alt_lna_conf =
1663 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1664 break;
1665 case ATH_ANT_DIV_COMB_LNA1:
1666 antcomb->rssi_lna1 = alt_rssi_avg;
1667 antcomb->rssi_lna2 = main_rssi_avg;
1668 antcomb->scan = true;
1669 /* set to A+B */
1670 div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
1671 div_ant_conf.alt_lna_conf =
1672 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1673 break;
1674 case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
1675 antcomb->rssi_add = alt_rssi_avg;
1676 antcomb->scan = true;
1677 /* set to A-B */
1678 div_ant_conf.alt_lna_conf =
1679 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1680 break;
1681 case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2:
1682 antcomb->rssi_sub = alt_rssi_avg;
1683 antcomb->scan = false;
1684 if (antcomb->rssi_lna2 >
1685 (antcomb->rssi_lna1 +
1686 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
1687 /* use LNA2 as main LNA */
1688 if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
1689 (antcomb->rssi_add > antcomb->rssi_sub)) {
1690 /* set to A+B */
1691 div_ant_conf.main_lna_conf =
1692 ATH_ANT_DIV_COMB_LNA2;
1693 div_ant_conf.alt_lna_conf =
1694 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1695 } else if (antcomb->rssi_sub >
1696 antcomb->rssi_lna1) {
1697 /* set to A-B */
1698 div_ant_conf.main_lna_conf =
1699 ATH_ANT_DIV_COMB_LNA2;
1700 div_ant_conf.alt_lna_conf =
1701 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1702 } else {
1703 /* set to LNA1 */
1704 div_ant_conf.main_lna_conf =
1705 ATH_ANT_DIV_COMB_LNA2;
1706 div_ant_conf.alt_lna_conf =
1707 ATH_ANT_DIV_COMB_LNA1;
1708 }
1709 } else {
1710 /* use LNA1 as main LNA */
1711 if ((antcomb->rssi_add > antcomb->rssi_lna2) &&
1712 (antcomb->rssi_add > antcomb->rssi_sub)) {
1713 /* set to A+B */
1714 div_ant_conf.main_lna_conf =
1715 ATH_ANT_DIV_COMB_LNA1;
1716 div_ant_conf.alt_lna_conf =
1717 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1718 } else if (antcomb->rssi_sub >
1719 antcomb->rssi_lna1) {
1720 /* set to A-B */
1721 div_ant_conf.main_lna_conf =
1722 ATH_ANT_DIV_COMB_LNA1;
1723 div_ant_conf.alt_lna_conf =
1724 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1725 } else {
1726 /* set to LNA2 */
1727 div_ant_conf.main_lna_conf =
1728 ATH_ANT_DIV_COMB_LNA1;
1729 div_ant_conf.alt_lna_conf =
1730 ATH_ANT_DIV_COMB_LNA2;
1731 }
1732 }
1733 break;
1734 default:
1735 break;
1736 }
1737 } else {
1738 if (!antcomb->alt_good) {
1739 antcomb->scan_not_start = false;
1740 /* Set alt to another LNA */
1741 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) {
1742 div_ant_conf.main_lna_conf =
1743 ATH_ANT_DIV_COMB_LNA2;
1744 div_ant_conf.alt_lna_conf =
1745 ATH_ANT_DIV_COMB_LNA1;
1746 } else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) {
1747 div_ant_conf.main_lna_conf =
1748 ATH_ANT_DIV_COMB_LNA1;
1749 div_ant_conf.alt_lna_conf =
1750 ATH_ANT_DIV_COMB_LNA2;
1751 }
1752 goto div_comb_done;
1753 }
1754 }
1755
1756 ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf,
1757 main_rssi_avg, alt_rssi_avg,
1758 alt_ratio);
1759
1760 antcomb->quick_scan_cnt++;
1761
1762div_comb_done:
1763 ath_ant_div_conf_fast_divbias(&div_ant_conf, antcomb, alt_ratio);
1764 ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf);
1765
1766 antcomb->scan_start_time = jiffies;
1767 antcomb->total_pkt_count = 0;
1768 antcomb->main_total_rssi = 0;
1769 antcomb->alt_total_rssi = 0;
1770 antcomb->main_recv_cnt = 0;
1771 antcomb->alt_recv_cnt = 0;
1772}
1773
1774int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) 1037int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1775{ 1038{
1776 struct ath_buf *bf; 1039 struct ath_buf *bf;
@@ -1804,7 +1067,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1804 1067
1805 do { 1068 do {
1806 /* If handling rx interrupt and flush is in progress => exit */ 1069 /* If handling rx interrupt and flush is in progress => exit */
1807 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) 1070 if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags) && (flush == 0))
1808 break; 1071 break;
1809 1072
1810 memset(&rs, 0, sizeof(rs)); 1073 memset(&rs, 0, sizeof(rs));
@@ -1842,13 +1105,14 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1842 else 1105 else
1843 rs.is_mybeacon = false; 1106 rs.is_mybeacon = false;
1844 1107
1108 sc->rx.num_pkts++;
1845 ath_debug_stat_rx(sc, &rs); 1109 ath_debug_stat_rx(sc, &rs);
1846 1110
1847 /* 1111 /*
1848 * If we're asked to flush receive queue, directly 1112 * If we're asked to flush receive queue, directly
1849 * chain it back at the queue without processing it. 1113 * chain it back at the queue without processing it.
1850 */ 1114 */
1851 if (sc->sc_flags & SC_OP_RXFLUSH) { 1115 if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags)) {
1852 RX_STAT_INC(rx_drop_rxflush); 1116 RX_STAT_INC(rx_drop_rxflush);
1853 goto requeue_drop_frag; 1117 goto requeue_drop_frag;
1854 } 1118 }
@@ -1969,7 +1233,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1969 skb_trim(skb, skb->len - 8); 1233 skb_trim(skb, skb->len - 8);
1970 1234
1971 spin_lock_irqsave(&sc->sc_pm_lock, flags); 1235 spin_lock_irqsave(&sc->sc_pm_lock, flags);
1972
1973 if ((sc->ps_flags & (PS_WAIT_FOR_BEACON | 1236 if ((sc->ps_flags & (PS_WAIT_FOR_BEACON |
1974 PS_WAIT_FOR_CAB | 1237 PS_WAIT_FOR_CAB |
1975 PS_WAIT_FOR_PSPOLL_DATA)) || 1238 PS_WAIT_FOR_PSPOLL_DATA)) ||
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 458f81b4a7cb..6592c07ac646 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -798,6 +798,7 @@
798#define AR_SREV_REVISION_9580_10 4 /* AR9580 1.0 */ 798#define AR_SREV_REVISION_9580_10 4 /* AR9580 1.0 */
799#define AR_SREV_VERSION_9462 0x280 799#define AR_SREV_VERSION_9462 0x280
800#define AR_SREV_REVISION_9462_20 2 800#define AR_SREV_REVISION_9462_20 2
801#define AR_SREV_VERSION_9550 0x400
801 802
802#define AR_SREV_5416(_ah) \ 803#define AR_SREV_5416(_ah) \
803 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \ 804 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \
@@ -905,6 +906,9 @@
905 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \ 906 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
906 ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9462_20)) 907 ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9462_20))
907 908
909#define AR_SREV_9550(_ah) \
910 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9550))
911
908#define AR_SREV_9580(_ah) \ 912#define AR_SREV_9580(_ah) \
909 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9580) && \ 913 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9580) && \
910 ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9580_10)) 914 ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9580_10))
@@ -1643,11 +1647,11 @@ enum {
1643 1647
1644#define AR_TPC 0x80e8 1648#define AR_TPC 0x80e8
1645#define AR_TPC_ACK 0x0000003f 1649#define AR_TPC_ACK 0x0000003f
1646#define AR_TPC_ACK_S 0x00 1650#define AR_TPC_ACK_S 0
1647#define AR_TPC_CTS 0x00003f00 1651#define AR_TPC_CTS 0x00003f00
1648#define AR_TPC_CTS_S 0x08 1652#define AR_TPC_CTS_S 8
1649#define AR_TPC_CHIRP 0x003f0000 1653#define AR_TPC_CHIRP 0x003f0000
1650#define AR_TPC_CHIRP_S 0x16 1654#define AR_TPC_CHIRP_S 16
1651 1655
1652#define AR_QUIET1 0x80fc 1656#define AR_QUIET1 0x80fc
1653#define AR_QUIET1_NEXT_QUIET_S 0 1657#define AR_QUIET1_NEXT_QUIET_S 0
@@ -2077,12 +2081,6 @@ enum {
2077 AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET| \ 2081 AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET| \
2078 AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING | \ 2082 AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING | \
2079 AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING| \ 2083 AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING| \
2080 AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \
2081 AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL | \
2082 AR_MCI_INTERRUPT_RX_MSG_LNA_INFO | \
2083 AR_MCI_INTERRUPT_RX_MSG_CONT_NACK | \
2084 AR_MCI_INTERRUPT_RX_MSG_CONT_INFO | \
2085 AR_MCI_INTERRUPT_RX_MSG_CONT_RST | \
2086 AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE) 2084 AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
2087 2085
2088#define AR_MCI_CPU_INT 0x1840 2086#define AR_MCI_CPU_INT 0x1840
@@ -2098,8 +2096,8 @@ enum {
2098#define AR_MCI_CONT_STATUS 0x1848 2096#define AR_MCI_CONT_STATUS 0x1848
2099#define AR_MCI_CONT_RSSI_POWER 0x000000FF 2097#define AR_MCI_CONT_RSSI_POWER 0x000000FF
2100#define AR_MCI_CONT_RSSI_POWER_S 0 2098#define AR_MCI_CONT_RSSI_POWER_S 0
2101#define AR_MCI_CONT_RRIORITY 0x0000FF00 2099#define AR_MCI_CONT_PRIORITY 0x0000FF00
2102#define AR_MCI_CONT_RRIORITY_S 8 2100#define AR_MCI_CONT_PRIORITY_S 8
2103#define AR_MCI_CONT_TXRX 0x00010000 2101#define AR_MCI_CONT_TXRX 0x00010000
2104#define AR_MCI_CONT_TXRX_S 16 2102#define AR_MCI_CONT_TXRX_S 16
2105 2103
@@ -2162,10 +2160,6 @@ enum {
2162#define AR_BTCOEX_CTRL_SPDT_POLARITY 0x80000000 2160#define AR_BTCOEX_CTRL_SPDT_POLARITY 0x80000000
2163#define AR_BTCOEX_CTRL_SPDT_POLARITY_S 31 2161#define AR_BTCOEX_CTRL_SPDT_POLARITY_S 31
2164 2162
2165#define AR_BTCOEX_WL_WEIGHTS0 0x18b0
2166#define AR_BTCOEX_WL_WEIGHTS1 0x18b4
2167#define AR_BTCOEX_WL_WEIGHTS2 0x18b8
2168#define AR_BTCOEX_WL_WEIGHTS3 0x18bc
2169#define AR_BTCOEX_MAX_TXPWR(_x) (0x18c0 + ((_x) << 2)) 2163#define AR_BTCOEX_MAX_TXPWR(_x) (0x18c0 + ((_x) << 2))
2170#define AR_BTCOEX_WL_LNA 0x1940 2164#define AR_BTCOEX_WL_LNA 0x1940
2171#define AR_BTCOEX_RFGAIN_CTRL 0x1944 2165#define AR_BTCOEX_RFGAIN_CTRL 0x1944
@@ -2211,5 +2205,7 @@ enum {
2211#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT 0x00000fff 2205#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT 0x00000fff
2212#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT_S 0 2206#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT_S 0
2213 2207
2208#define AR_GLB_SWREG_DISCONT_MODE 0x2002c
2209#define AR_GLB_SWREG_DISCONT_EN_BT_WLAN 0x3
2214 2210
2215#endif 2211#endif
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 4d571394c7a8..cafb4a09729a 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -105,19 +105,19 @@ static int ath_max_4ms_framelen[4][32] = {
105/* Aggregation logic */ 105/* Aggregation logic */
106/*********************/ 106/*********************/
107 107
108static void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq) 108void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
109 __acquires(&txq->axq_lock) 109 __acquires(&txq->axq_lock)
110{ 110{
111 spin_lock_bh(&txq->axq_lock); 111 spin_lock_bh(&txq->axq_lock);
112} 112}
113 113
114static void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq) 114void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
115 __releases(&txq->axq_lock) 115 __releases(&txq->axq_lock)
116{ 116{
117 spin_unlock_bh(&txq->axq_lock); 117 spin_unlock_bh(&txq->axq_lock);
118} 118}
119 119
120static void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq) 120void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
121 __releases(&txq->axq_lock) 121 __releases(&txq->axq_lock)
122{ 122{
123 struct sk_buff_head q; 123 struct sk_buff_head q;
@@ -1165,6 +1165,7 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1165{ 1165{
1166 struct ath_atx_tid *txtid; 1166 struct ath_atx_tid *txtid;
1167 struct ath_node *an; 1167 struct ath_node *an;
1168 u8 density;
1168 1169
1169 an = (struct ath_node *)sta->drv_priv; 1170 an = (struct ath_node *)sta->drv_priv;
1170 txtid = ATH_AN_2_TID(an, tid); 1171 txtid = ATH_AN_2_TID(an, tid);
@@ -1172,6 +1173,17 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1172 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE)) 1173 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
1173 return -EAGAIN; 1174 return -EAGAIN;
1174 1175
1176 /* update ampdu factor/density, they may have changed. This may happen
1177 * in HT IBSS when a beacon with HT-info is received after the station
1178 * has already been added.
1179 */
1180 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
1181 an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
1182 sta->ht_cap.ampdu_factor);
1183 density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
1184 an->mpdudensity = density;
1185 }
1186
1175 txtid->state |= AGGR_ADDBA_PROGRESS; 1187 txtid->state |= AGGR_ADDBA_PROGRESS;
1176 txtid->paused = true; 1188 txtid->paused = true;
1177 *ssn = txtid->seq_start = txtid->seq_next; 1189 *ssn = txtid->seq_start = txtid->seq_next;
@@ -1526,7 +1538,7 @@ bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1526 int i; 1538 int i;
1527 u32 npend = 0; 1539 u32 npend = 0;
1528 1540
1529 if (sc->sc_flags & SC_OP_INVALID) 1541 if (test_bit(SC_OP_INVALID, &sc->sc_flags))
1530 return true; 1542 return true;
1531 1543
1532 ath9k_hw_abort_tx_dma(ah); 1544 ath9k_hw_abort_tx_dma(ah);
@@ -1999,6 +2011,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1999 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2011 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2000 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data; 2012 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
2001 int q, padpos, padsize; 2013 int q, padpos, padsize;
2014 unsigned long flags;
2002 2015
2003 ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb); 2016 ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
2004 2017
@@ -2017,6 +2030,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
2017 skb_pull(skb, padsize); 2030 skb_pull(skb, padsize);
2018 } 2031 }
2019 2032
2033 spin_lock_irqsave(&sc->sc_pm_lock, flags);
2020 if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) { 2034 if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
2021 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK; 2035 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
2022 ath_dbg(common, PS, 2036 ath_dbg(common, PS,
@@ -2026,6 +2040,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
2026 PS_WAIT_FOR_PSPOLL_DATA | 2040 PS_WAIT_FOR_PSPOLL_DATA |
2027 PS_WAIT_FOR_TX_ACK)); 2041 PS_WAIT_FOR_TX_ACK));
2028 } 2042 }
2043 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
2029 2044
2030 q = skb_get_queue_mapping(skb); 2045 q = skb_get_queue_mapping(skb);
2031 if (txq == sc->tx.txq_map[q]) { 2046 if (txq == sc->tx.txq_map[q]) {
@@ -2236,46 +2251,6 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2236 ath_txq_unlock_complete(sc, txq); 2251 ath_txq_unlock_complete(sc, txq);
2237} 2252}
2238 2253
2239static void ath_tx_complete_poll_work(struct work_struct *work)
2240{
2241 struct ath_softc *sc = container_of(work, struct ath_softc,
2242 tx_complete_work.work);
2243 struct ath_txq *txq;
2244 int i;
2245 bool needreset = false;
2246#ifdef CONFIG_ATH9K_DEBUGFS
2247 sc->tx_complete_poll_work_seen++;
2248#endif
2249
2250 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2251 if (ATH_TXQ_SETUP(sc, i)) {
2252 txq = &sc->tx.txq[i];
2253 ath_txq_lock(sc, txq);
2254 if (txq->axq_depth) {
2255 if (txq->axq_tx_inprogress) {
2256 needreset = true;
2257 ath_txq_unlock(sc, txq);
2258 break;
2259 } else {
2260 txq->axq_tx_inprogress = true;
2261 }
2262 }
2263 ath_txq_unlock_complete(sc, txq);
2264 }
2265
2266 if (needreset) {
2267 ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
2268 "tx hung, resetting the chip\n");
2269 RESET_STAT_INC(sc, RESET_TYPE_TX_HANG);
2270 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
2271 }
2272
2273 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
2274 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2275}
2276
2277
2278
2279void ath_tx_tasklet(struct ath_softc *sc) 2254void ath_tx_tasklet(struct ath_softc *sc)
2280{ 2255{
2281 struct ath_hw *ah = sc->sc_ah; 2256 struct ath_hw *ah = sc->sc_ah;
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index 0cea20e3e250..376be11161c0 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -289,6 +289,7 @@ struct ar9170 {
289 unsigned int mem_block_size; 289 unsigned int mem_block_size;
290 unsigned int rx_size; 290 unsigned int rx_size;
291 unsigned int tx_seq_table; 291 unsigned int tx_seq_table;
292 bool ba_filter;
292 } fw; 293 } fw;
293 294
294 /* interface configuration combinations */ 295 /* interface configuration combinations */
@@ -425,6 +426,10 @@ struct ar9170 {
425 struct sk_buff *rx_failover; 426 struct sk_buff *rx_failover;
426 int rx_failover_missing; 427 int rx_failover_missing;
427 428
429 /* FIFO for collecting outstanding BlockAckRequest */
430 struct list_head bar_list[__AR9170_NUM_TXQ];
431 spinlock_t bar_list_lock[__AR9170_NUM_TXQ];
432
428#ifdef CONFIG_CARL9170_WPC 433#ifdef CONFIG_CARL9170_WPC
429 struct { 434 struct {
430 bool pbc_state; 435 bool pbc_state;
@@ -468,6 +473,12 @@ enum carl9170_ps_off_override_reasons {
468 PS_OFF_BCN = BIT(1), 473 PS_OFF_BCN = BIT(1),
469}; 474};
470 475
476struct carl9170_bar_list_entry {
477 struct list_head list;
478 struct rcu_head head;
479 struct sk_buff *skb;
480};
481
471struct carl9170_ba_stats { 482struct carl9170_ba_stats {
472 u8 ampdu_len; 483 u8 ampdu_len;
473 u8 ampdu_ack_len; 484 u8 ampdu_ack_len;
diff --git a/drivers/net/wireless/ath/carl9170/cmd.c b/drivers/net/wireless/ath/carl9170/cmd.c
index 195dc6538110..39a63874b275 100644
--- a/drivers/net/wireless/ath/carl9170/cmd.c
+++ b/drivers/net/wireless/ath/carl9170/cmd.c
@@ -138,7 +138,7 @@ int carl9170_reboot(struct ar9170 *ar)
138 if (!cmd) 138 if (!cmd)
139 return -ENOMEM; 139 return -ENOMEM;
140 140
141 err = __carl9170_exec_cmd(ar, (struct carl9170_cmd *)cmd, true); 141 err = __carl9170_exec_cmd(ar, cmd, true);
142 return err; 142 return err;
143} 143}
144 144
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index 5c73c03872f3..c5ca6f1f5836 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -307,6 +307,9 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
307 if (SUPP(CARL9170FW_WOL)) 307 if (SUPP(CARL9170FW_WOL))
308 device_set_wakeup_enable(&ar->udev->dev, true); 308 device_set_wakeup_enable(&ar->udev->dev, true);
309 309
310 if (SUPP(CARL9170FW_RX_BA_FILTER))
311 ar->fw.ba_filter = true;
312
310 if_comb_types = BIT(NL80211_IFTYPE_STATION) | 313 if_comb_types = BIT(NL80211_IFTYPE_STATION) |
311 BIT(NL80211_IFTYPE_P2P_CLIENT); 314 BIT(NL80211_IFTYPE_P2P_CLIENT);
312 315
diff --git a/drivers/net/wireless/ath/carl9170/fwdesc.h b/drivers/net/wireless/ath/carl9170/fwdesc.h
index 6d9c0891ce7f..66848d47c88e 100644
--- a/drivers/net/wireless/ath/carl9170/fwdesc.h
+++ b/drivers/net/wireless/ath/carl9170/fwdesc.h
@@ -78,6 +78,9 @@ enum carl9170fw_feature_list {
78 /* HW (ANI, CCA, MIB) tally counters */ 78 /* HW (ANI, CCA, MIB) tally counters */
79 CARL9170FW_HW_COUNTERS, 79 CARL9170FW_HW_COUNTERS,
80 80
81 /* Firmware will pass BA when BARs are queued */
82 CARL9170FW_RX_BA_FILTER,
83
81 /* KEEP LAST */ 84 /* KEEP LAST */
82 __CARL9170FW_FEATURE_NUM 85 __CARL9170FW_FEATURE_NUM
83}; 86};
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 8d2523b3f722..858e58dfc4dc 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -949,6 +949,9 @@ static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
949 if (ar->fw.rx_filter && changed_flags & ar->rx_filter_caps) { 949 if (ar->fw.rx_filter && changed_flags & ar->rx_filter_caps) {
950 u32 rx_filter = 0; 950 u32 rx_filter = 0;
951 951
952 if (!ar->fw.ba_filter)
953 rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
954
952 if (!(*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL))) 955 if (!(*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL)))
953 rx_filter |= CARL9170_RX_FILTER_BAD; 956 rx_filter |= CARL9170_RX_FILTER_BAD;
954 957
@@ -1753,6 +1756,9 @@ void *carl9170_alloc(size_t priv_size)
1753 for (i = 0; i < ar->hw->queues; i++) { 1756 for (i = 0; i < ar->hw->queues; i++) {
1754 skb_queue_head_init(&ar->tx_status[i]); 1757 skb_queue_head_init(&ar->tx_status[i]);
1755 skb_queue_head_init(&ar->tx_pending[i]); 1758 skb_queue_head_init(&ar->tx_pending[i]);
1759
1760 INIT_LIST_HEAD(&ar->bar_list[i]);
1761 spin_lock_init(&ar->bar_list_lock[i]);
1756 } 1762 }
1757 INIT_WORK(&ar->ps_work, carl9170_ps_work); 1763 INIT_WORK(&ar->ps_work, carl9170_ps_work);
1758 INIT_WORK(&ar->ping_work, carl9170_ping_work); 1764 INIT_WORK(&ar->ping_work, carl9170_ping_work);
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index 84b22eec7abd..6f6a34155667 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -161,7 +161,7 @@ static void carl9170_cmd_callback(struct ar9170 *ar, u32 len, void *buffer)
161 161
162void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len) 162void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
163{ 163{
164 struct carl9170_rsp *cmd = (void *) buf; 164 struct carl9170_rsp *cmd = buf;
165 struct ieee80211_vif *vif; 165 struct ieee80211_vif *vif;
166 166
167 if (carl9170_check_sequence(ar, cmd->hdr.seq)) 167 if (carl9170_check_sequence(ar, cmd->hdr.seq))
@@ -520,7 +520,7 @@ static u8 *carl9170_find_ie(u8 *data, unsigned int len, u8 ie)
520 */ 520 */
521static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len) 521static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
522{ 522{
523 struct ieee80211_hdr *hdr = (void *) data; 523 struct ieee80211_hdr *hdr = data;
524 struct ieee80211_tim_ie *tim_ie; 524 struct ieee80211_tim_ie *tim_ie;
525 u8 *tim; 525 u8 *tim;
526 u8 tim_len; 526 u8 tim_len;
@@ -576,6 +576,53 @@ static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
576 } 576 }
577} 577}
578 578
579static void carl9170_ba_check(struct ar9170 *ar, void *data, unsigned int len)
580{
581 struct ieee80211_bar *bar = (void *) data;
582 struct carl9170_bar_list_entry *entry;
583 unsigned int queue;
584
585 if (likely(!ieee80211_is_back(bar->frame_control)))
586 return;
587
588 if (len <= sizeof(*bar) + FCS_LEN)
589 return;
590
591 queue = TID_TO_WME_AC(((le16_to_cpu(bar->control) &
592 IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
593 IEEE80211_BAR_CTRL_TID_INFO_SHIFT) & 7);
594
595 rcu_read_lock();
596 list_for_each_entry_rcu(entry, &ar->bar_list[queue], list) {
597 struct sk_buff *entry_skb = entry->skb;
598 struct _carl9170_tx_superframe *super = (void *)entry_skb->data;
599 struct ieee80211_bar *entry_bar = (void *)super->frame_data;
600
601#define TID_CHECK(a, b) ( \
602 ((a) & cpu_to_le16(IEEE80211_BAR_CTRL_TID_INFO_MASK)) == \
603 ((b) & cpu_to_le16(IEEE80211_BAR_CTRL_TID_INFO_MASK))) \
604
605 if (bar->start_seq_num == entry_bar->start_seq_num &&
606 TID_CHECK(bar->control, entry_bar->control) &&
607 compare_ether_addr(bar->ra, entry_bar->ta) == 0 &&
608 compare_ether_addr(bar->ta, entry_bar->ra) == 0) {
609 struct ieee80211_tx_info *tx_info;
610
611 tx_info = IEEE80211_SKB_CB(entry_skb);
612 tx_info->flags |= IEEE80211_TX_STAT_ACK;
613
614 spin_lock_bh(&ar->bar_list_lock[queue]);
615 list_del_rcu(&entry->list);
616 spin_unlock_bh(&ar->bar_list_lock[queue]);
617 kfree_rcu(entry, head);
618 break;
619 }
620 }
621 rcu_read_unlock();
622
623#undef TID_CHECK
624}
625
579static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms) 626static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms)
580{ 627{
581 __le16 fc; 628 __le16 fc;
@@ -738,6 +785,8 @@ static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
738 785
739 carl9170_ps_beacon(ar, buf, mpdu_len); 786 carl9170_ps_beacon(ar, buf, mpdu_len);
740 787
788 carl9170_ba_check(ar, buf, mpdu_len);
789
741 skb = carl9170_rx_copy_data(buf, mpdu_len); 790 skb = carl9170_rx_copy_data(buf, mpdu_len);
742 if (!skb) 791 if (!skb)
743 goto drop; 792 goto drop;
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index aed305177af6..6a8681407a1d 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -277,11 +277,11 @@ static void carl9170_tx_release(struct kref *ref)
277 return; 277 return;
278 278
279 BUILD_BUG_ON( 279 BUILD_BUG_ON(
280 offsetof(struct ieee80211_tx_info, status.ampdu_ack_len) != 23); 280 offsetof(struct ieee80211_tx_info, status.ack_signal) != 20);
281 281
282 memset(&txinfo->status.ampdu_ack_len, 0, 282 memset(&txinfo->status.ack_signal, 0,
283 sizeof(struct ieee80211_tx_info) - 283 sizeof(struct ieee80211_tx_info) -
284 offsetof(struct ieee80211_tx_info, status.ampdu_ack_len)); 284 offsetof(struct ieee80211_tx_info, status.ack_signal));
285 285
286 if (atomic_read(&ar->tx_total_queued)) 286 if (atomic_read(&ar->tx_total_queued))
287 ar->tx_schedule = true; 287 ar->tx_schedule = true;
@@ -436,6 +436,45 @@ out_rcu:
436 rcu_read_unlock(); 436 rcu_read_unlock();
437} 437}
438 438
439static void carl9170_tx_bar_status(struct ar9170 *ar, struct sk_buff *skb,
440 struct ieee80211_tx_info *tx_info)
441{
442 struct _carl9170_tx_superframe *super = (void *) skb->data;
443 struct ieee80211_bar *bar = (void *) super->frame_data;
444
445 /*
446 * Unlike all other frames, the status report for BARs does
447 * not directly come from the hardware as it is incapable of
448 * matching a BA to a previously send BAR.
449 * Instead the RX-path will scan for incoming BAs and set the
450 * IEEE80211_TX_STAT_ACK if it sees one that was likely
451 * caused by a BAR from us.
452 */
453
454 if (unlikely(ieee80211_is_back_req(bar->frame_control)) &&
455 !(tx_info->flags & IEEE80211_TX_STAT_ACK)) {
456 struct carl9170_bar_list_entry *entry;
457 int queue = skb_get_queue_mapping(skb);
458
459 rcu_read_lock();
460 list_for_each_entry_rcu(entry, &ar->bar_list[queue], list) {
461 if (entry->skb == skb) {
462 spin_lock_bh(&ar->bar_list_lock[queue]);
463 list_del_rcu(&entry->list);
464 spin_unlock_bh(&ar->bar_list_lock[queue]);
465 kfree_rcu(entry, head);
466 goto out;
467 }
468 }
469
470 WARN(1, "bar not found in %d - ra:%pM ta:%pM c:%x ssn:%x\n",
471 queue, bar->ra, bar->ta, bar->control,
472 bar->start_seq_num);
473out:
474 rcu_read_unlock();
475 }
476}
477
439void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb, 478void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
440 const bool success) 479 const bool success)
441{ 480{
@@ -445,6 +484,8 @@ void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
445 484
446 txinfo = IEEE80211_SKB_CB(skb); 485 txinfo = IEEE80211_SKB_CB(skb);
447 486
487 carl9170_tx_bar_status(ar, skb, txinfo);
488
448 if (success) 489 if (success)
449 txinfo->flags |= IEEE80211_TX_STAT_ACK; 490 txinfo->flags |= IEEE80211_TX_STAT_ACK;
450 else 491 else
@@ -1265,6 +1306,26 @@ out_rcu:
1265 return false; 1306 return false;
1266} 1307}
1267 1308
1309static void carl9170_bar_check(struct ar9170 *ar, struct sk_buff *skb)
1310{
1311 struct _carl9170_tx_superframe *super = (void *) skb->data;
1312 struct ieee80211_bar *bar = (void *) super->frame_data;
1313
1314 if (unlikely(ieee80211_is_back_req(bar->frame_control)) &&
1315 skb->len >= sizeof(struct ieee80211_bar)) {
1316 struct carl9170_bar_list_entry *entry;
1317 unsigned int queue = skb_get_queue_mapping(skb);
1318
1319 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
1320 if (!WARN_ON_ONCE(!entry)) {
1321 entry->skb = skb;
1322 spin_lock_bh(&ar->bar_list_lock[queue]);
1323 list_add_tail_rcu(&entry->list, &ar->bar_list[queue]);
1324 spin_unlock_bh(&ar->bar_list_lock[queue]);
1325 }
1326 }
1327}
1328
1268static void carl9170_tx(struct ar9170 *ar) 1329static void carl9170_tx(struct ar9170 *ar)
1269{ 1330{
1270 struct sk_buff *skb; 1331 struct sk_buff *skb;
@@ -1287,6 +1348,8 @@ static void carl9170_tx(struct ar9170 *ar)
1287 if (unlikely(carl9170_tx_ps_drop(ar, skb))) 1348 if (unlikely(carl9170_tx_ps_drop(ar, skb)))
1288 continue; 1349 continue;
1289 1350
1351 carl9170_bar_check(ar, skb);
1352
1290 atomic_inc(&ar->tx_total_pending); 1353 atomic_inc(&ar->tx_total_pending);
1291 1354
1292 q = __carl9170_get_queue(ar, i); 1355 q = __carl9170_get_queue(ar, i);
diff --git a/drivers/net/wireless/ath/carl9170/version.h b/drivers/net/wireless/ath/carl9170/version.h
index e651db856344..2ec3e9191e4d 100644
--- a/drivers/net/wireless/ath/carl9170/version.h
+++ b/drivers/net/wireless/ath/carl9170/version.h
@@ -1,7 +1,7 @@
1#ifndef __CARL9170_SHARED_VERSION_H 1#ifndef __CARL9170_SHARED_VERSION_H
2#define __CARL9170_SHARED_VERSION_H 2#define __CARL9170_SHARED_VERSION_H
3#define CARL9170FW_VERSION_YEAR 11 3#define CARL9170FW_VERSION_YEAR 12
4#define CARL9170FW_VERSION_MONTH 8 4#define CARL9170FW_VERSION_MONTH 7
5#define CARL9170FW_VERSION_DAY 15 5#define CARL9170FW_VERSION_DAY 7
6#define CARL9170FW_VERSION_GIT "1.9.4" 6#define CARL9170FW_VERSION_GIT "1.9.6"
7#endif /* __CARL9170_SHARED_VERSION_H */ 7#endif /* __CARL9170_SHARED_VERSION_H */
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index d07c0301da6a..4a4e98f71807 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -2952,10 +2952,10 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
2952 /* current AP address - only in reassoc frame */ 2952 /* current AP address - only in reassoc frame */
2953 if (is_reassoc) { 2953 if (is_reassoc) {
2954 memcpy(body.ap, priv->CurrentBSSID, 6); 2954 memcpy(body.ap, priv->CurrentBSSID, 6);
2955 ssid_el_p = (u8 *)&body.ssid_el_id; 2955 ssid_el_p = &body.ssid_el_id;
2956 bodysize = 18 + priv->SSID_size; 2956 bodysize = 18 + priv->SSID_size;
2957 } else { 2957 } else {
2958 ssid_el_p = (u8 *)&body.ap[0]; 2958 ssid_el_p = &body.ap[0];
2959 bodysize = 12 + priv->SSID_size; 2959 bodysize = 12 + priv->SSID_size;
2960 } 2960 }
2961 2961
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 108118820b36..b92bb9c92ad1 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -1369,7 +1369,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
1369 i << 2); 1369 i << 2);
1370 b43_nphy_poll_rssi(dev, 2, results[i], 8); 1370 b43_nphy_poll_rssi(dev, 2, results[i], 8);
1371 } 1371 }
1372 for (i = 0; i < 4; i++) { 1372 for (i = 0; i < 4; i += 2) {
1373 s32 curr; 1373 s32 curr;
1374 s32 mind = 40; 1374 s32 mind = 40;
1375 s32 minpoll = 249; 1375 s32 minpoll = 249;
@@ -1415,14 +1415,15 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
1415 b43_nphy_scale_offset_rssi(dev, 0, 0, core + 1, 1, i); 1415 b43_nphy_scale_offset_rssi(dev, 0, 0, core + 1, 1, i);
1416 b43_nphy_poll_rssi(dev, i, poll_results, 8); 1416 b43_nphy_poll_rssi(dev, i, poll_results, 8);
1417 for (j = 0; j < 4; j++) { 1417 for (j = 0; j < 4; j++) {
1418 if (j / 2 == core) 1418 if (j / 2 == core) {
1419 offset[j] = 232 - poll_results[j]; 1419 offset[j] = 232 - poll_results[j];
1420 if (offset[j] < 0) 1420 if (offset[j] < 0)
1421 offset[j] = -(abs(offset[j] + 4) / 8); 1421 offset[j] = -(abs(offset[j] + 4) / 8);
1422 else 1422 else
1423 offset[j] = (offset[j] + 4) / 8; 1423 offset[j] = (offset[j] + 4) / 8;
1424 b43_nphy_scale_offset_rssi(dev, 0, 1424 b43_nphy_scale_offset_rssi(dev, 0,
1425 offset[2 * core], core + 1, j % 2, i); 1425 offset[2 * core], core + 1, j % 2, i);
1426 }
1426 } 1427 }
1427 } 1428 }
1428 } 1429 }
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index c8baf020c20f..2d3c6644f82d 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -52,7 +52,7 @@ struct b43legacy_dmadesc32 *op32_idx2desc(struct b43legacy_dmaring *ring,
52 desc = ring->descbase; 52 desc = ring->descbase;
53 desc = &(desc[slot]); 53 desc = &(desc[slot]);
54 54
55 return (struct b43legacy_dmadesc32 *)desc; 55 return desc;
56} 56}
57 57
58static void op32_fill_descriptor(struct b43legacy_dmaring *ring, 58static void op32_fill_descriptor(struct b43legacy_dmaring *ring,
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index eae691e2f7dd..8156135a0590 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -1508,7 +1508,7 @@ static void b43legacy_release_firmware(struct b43legacy_wldev *dev)
1508 1508
1509static void b43legacy_print_fw_helptext(struct b43legacy_wl *wl) 1509static void b43legacy_print_fw_helptext(struct b43legacy_wl *wl)
1510{ 1510{
1511 b43legacyerr(wl, "You must go to http://linuxwireless.org/en/users/" 1511 b43legacyerr(wl, "You must go to http://wireless.kernel.org/en/users/"
1512 "Drivers/b43#devicefirmware " 1512 "Drivers/b43#devicefirmware "
1513 "and download the correct firmware (version 3).\n"); 1513 "and download the correct firmware (version 3).\n");
1514} 1514}
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index a8012f2749ee..b8ffea6f5c64 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -269,8 +269,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
269 b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *) 269 b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *)
270 (&txhdr->plcp), plcp_fragment_len, 270 (&txhdr->plcp), plcp_fragment_len,
271 rate); 271 rate);
272 b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *) 272 b43legacy_generate_plcp_hdr(&txhdr->plcp_fb, plcp_fragment_len,
273 (&txhdr->plcp_fb), plcp_fragment_len,
274 rate_fb->hw_value); 273 rate_fb->hw_value);
275 274
276 /* PHY TX Control word */ 275 /* PHY TX Control word */
@@ -340,8 +339,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
340 b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *) 339 b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *)
341 (&txhdr->rts_plcp), 340 (&txhdr->rts_plcp),
342 len, rts_rate); 341 len, rts_rate);
343 b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *) 342 b43legacy_generate_plcp_hdr(&txhdr->rts_plcp_fb,
344 (&txhdr->rts_plcp_fb),
345 len, rts_rate_fb); 343 len, rts_rate_fb);
346 hdr = (struct ieee80211_hdr *)(&txhdr->rts_frame); 344 hdr = (struct ieee80211_hdr *)(&txhdr->rts_frame);
347 txhdr->rts_dur_fb = hdr->duration_id; 345 txhdr->rts_dur_fb = hdr->duration_id;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index abb48032753b..9d5170b6df50 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -34,3 +34,5 @@ brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
34 sdio_chip.o 34 sdio_chip.o
35brcmfmac-$(CONFIG_BRCMFMAC_USB) += \ 35brcmfmac-$(CONFIG_BRCMFMAC_USB) += \
36 usb.o 36 usb.o
37brcmfmac-$(CONFIG_BRCMDBG) += \
38 dhd_dbg.o \ No newline at end of file
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index 82f51dbd0d66..49765d34b4e0 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -44,6 +44,7 @@
44 44
45#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329 45#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
46#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330 46#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
47#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334
47 48
48#define SDIO_FUNC1_BLOCKSIZE 64 49#define SDIO_FUNC1_BLOCKSIZE 64
49#define SDIO_FUNC2_BLOCKSIZE 512 50#define SDIO_FUNC2_BLOCKSIZE 512
@@ -52,6 +53,7 @@
52static const struct sdio_device_id brcmf_sdmmc_ids[] = { 53static const struct sdio_device_id brcmf_sdmmc_ids[] = {
53 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)}, 54 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
54 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)}, 55 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
56 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)},
55 { /* end: all zeroes */ }, 57 { /* end: all zeroes */ },
56}; 58};
57MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids); 59MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 9f637014486e..a11fe54f5950 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -613,6 +613,9 @@ struct brcmf_pub {
613 struct work_struct multicast_work; 613 struct work_struct multicast_work;
614 u8 macvalue[ETH_ALEN]; 614 u8 macvalue[ETH_ALEN];
615 atomic_t pend_8021x_cnt; 615 atomic_t pend_8021x_cnt;
616#ifdef DEBUG
617 struct dentry *dbgfs_dir;
618#endif
616}; 619};
617 620
618struct brcmf_if_event { 621struct brcmf_if_event {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index 366916494be4..537f499cc5d2 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -36,6 +36,13 @@ struct dngl_stats {
36 unsigned long multicast; /* multicast packets received */ 36 unsigned long multicast; /* multicast packets received */
37}; 37};
38 38
39struct brcmf_bus_dcmd {
40 char *name;
41 char *param;
42 int param_len;
43 struct list_head list;
44};
45
39/* interface structure between common and bus layer */ 46/* interface structure between common and bus layer */
40struct brcmf_bus { 47struct brcmf_bus {
41 u8 type; /* bus type */ 48 u8 type; /* bus type */
@@ -50,6 +57,7 @@ struct brcmf_bus {
50 unsigned long tx_realloc; /* Tx packets realloced for headroom */ 57 unsigned long tx_realloc; /* Tx packets realloced for headroom */
51 struct dngl_stats dstats; /* Stats for dongle-based data */ 58 struct dngl_stats dstats; /* Stats for dongle-based data */
52 u8 align; /* bus alignment requirement */ 59 u8 align; /* bus alignment requirement */
60 struct list_head dcmd_list;
53 61
54 /* interface functions pointers */ 62 /* interface functions pointers */
55 /* Stop bus module: clear pending frames, disable data flow */ 63 /* Stop bus module: clear pending frames, disable data flow */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
index 236cb9fa460c..2621dd3d7dcd 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -800,13 +800,13 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
800 char iovbuf[BRCMF_EVENTING_MASK_LEN + 12]; /* Room for 800 char iovbuf[BRCMF_EVENTING_MASK_LEN + 12]; /* Room for
801 "event_msgs" + '\0' + bitvec */ 801 "event_msgs" + '\0' + bitvec */
802 char buf[128], *ptr; 802 char buf[128], *ptr;
803 u32 dongle_align = drvr->bus_if->align;
804 u32 glom = 0;
805 u32 roaming = 1; 803 u32 roaming = 1;
806 uint bcn_timeout = 3; 804 uint bcn_timeout = 3;
807 int scan_assoc_time = 40; 805 int scan_assoc_time = 40;
808 int scan_unassoc_time = 40; 806 int scan_unassoc_time = 40;
809 int i; 807 int i;
808 struct brcmf_bus_dcmd *cmdlst;
809 struct list_head *cur, *q;
810 810
811 mutex_lock(&drvr->proto_block); 811 mutex_lock(&drvr->proto_block);
812 812
@@ -827,17 +827,6 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
827 /* Print fw version info */ 827 /* Print fw version info */
828 brcmf_dbg(ERROR, "Firmware version = %s\n", buf); 828 brcmf_dbg(ERROR, "Firmware version = %s\n", buf);
829 829
830 /* Match Host and Dongle rx alignment */
831 brcmf_c_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf,
832 sizeof(iovbuf));
833 brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
834 sizeof(iovbuf));
835
836 /* disable glom option per default */
837 brcmf_c_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
838 brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
839 sizeof(iovbuf));
840
841 /* Setup timeout if Beacons are lost and roam is off to report 830 /* Setup timeout if Beacons are lost and roam is off to report
842 link down */ 831 link down */
843 brcmf_c_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, 832 brcmf_c_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf,
@@ -874,6 +863,20 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
874 0, true); 863 0, true);
875 } 864 }
876 865
866 /* set bus specific command if there is any */
867 list_for_each_safe(cur, q, &drvr->bus_if->dcmd_list) {
868 cmdlst = list_entry(cur, struct brcmf_bus_dcmd, list);
869 if (cmdlst->name && cmdlst->param && cmdlst->param_len) {
870 brcmf_c_mkiovar(cmdlst->name, cmdlst->param,
871 cmdlst->param_len, iovbuf,
872 sizeof(iovbuf));
873 brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR,
874 iovbuf, sizeof(iovbuf));
875 }
876 list_del(cur);
877 kfree(cmdlst);
878 }
879
877 mutex_unlock(&drvr->proto_block); 880 mutex_unlock(&drvr->proto_block);
878 881
879 return 0; 882 return 0;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
new file mode 100644
index 000000000000..7f89540b56da
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
@@ -0,0 +1,126 @@
1/*
2 * Copyright (c) 2012 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16#include <linux/debugfs.h>
17#include <linux/if_ether.h>
18#include <linux/if.h>
19#include <linux/ieee80211.h>
20#include <linux/module.h>
21
22#include <defs.h>
23#include <brcmu_wifi.h>
24#include <brcmu_utils.h>
25#include "dhd.h"
26#include "dhd_bus.h"
27#include "dhd_dbg.h"
28
29static struct dentry *root_folder;
30
31void brcmf_debugfs_init(void)
32{
33 root_folder = debugfs_create_dir(KBUILD_MODNAME, NULL);
34 if (IS_ERR(root_folder))
35 root_folder = NULL;
36}
37
38void brcmf_debugfs_exit(void)
39{
40 if (!root_folder)
41 return;
42
43 debugfs_remove_recursive(root_folder);
44 root_folder = NULL;
45}
46
47int brcmf_debugfs_attach(struct brcmf_pub *drvr)
48{
49 if (!root_folder)
50 return -ENODEV;
51
52 drvr->dbgfs_dir = debugfs_create_dir(dev_name(drvr->dev), root_folder);
53 return PTR_RET(drvr->dbgfs_dir);
54}
55
56void brcmf_debugfs_detach(struct brcmf_pub *drvr)
57{
58 if (!IS_ERR_OR_NULL(drvr->dbgfs_dir))
59 debugfs_remove_recursive(drvr->dbgfs_dir);
60}
61
62struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr)
63{
64 return drvr->dbgfs_dir;
65}
66
67static
68ssize_t brcmf_debugfs_sdio_counter_read(struct file *f, char __user *data,
69 size_t count, loff_t *ppos)
70{
71 struct brcmf_sdio_count *sdcnt = f->private_data;
72 char buf[750];
73 int res;
74
75 /* only allow read from start */
76 if (*ppos > 0)
77 return 0;
78
79 res = scnprintf(buf, sizeof(buf),
80 "intrcount: %u\nlastintrs: %u\n"
81 "pollcnt: %u\nregfails: %u\n"
82 "tx_sderrs: %u\nfcqueued: %u\n"
83 "rxrtx: %u\nrx_toolong: %u\n"
84 "rxc_errors: %u\nrx_hdrfail: %u\n"
85 "rx_badhdr: %u\nrx_badseq: %u\n"
86 "fc_rcvd: %u\nfc_xoff: %u\n"
87 "fc_xon: %u\nrxglomfail: %u\n"
88 "rxglomframes: %u\nrxglompkts: %u\n"
89 "f2rxhdrs: %u\nf2rxdata: %u\n"
90 "f2txdata: %u\nf1regdata: %u\n"
91 "tickcnt: %u\ntx_ctlerrs: %lu\n"
92 "tx_ctlpkts: %lu\nrx_ctlerrs: %lu\n"
93 "rx_ctlpkts: %lu\nrx_readahead: %lu\n",
94 sdcnt->intrcount, sdcnt->lastintrs,
95 sdcnt->pollcnt, sdcnt->regfails,
96 sdcnt->tx_sderrs, sdcnt->fcqueued,
97 sdcnt->rxrtx, sdcnt->rx_toolong,
98 sdcnt->rxc_errors, sdcnt->rx_hdrfail,
99 sdcnt->rx_badhdr, sdcnt->rx_badseq,
100 sdcnt->fc_rcvd, sdcnt->fc_xoff,
101 sdcnt->fc_xon, sdcnt->rxglomfail,
102 sdcnt->rxglomframes, sdcnt->rxglompkts,
103 sdcnt->f2rxhdrs, sdcnt->f2rxdata,
104 sdcnt->f2txdata, sdcnt->f1regdata,
105 sdcnt->tickcnt, sdcnt->tx_ctlerrs,
106 sdcnt->tx_ctlpkts, sdcnt->rx_ctlerrs,
107 sdcnt->rx_ctlpkts, sdcnt->rx_readahead_cnt);
108
109 return simple_read_from_buffer(data, count, ppos, buf, res);
110}
111
112static const struct file_operations brcmf_debugfs_sdio_counter_ops = {
113 .owner = THIS_MODULE,
114 .open = simple_open,
115 .read = brcmf_debugfs_sdio_counter_read
116};
117
118void brcmf_debugfs_create_sdio_count(struct brcmf_pub *drvr,
119 struct brcmf_sdio_count *sdcnt)
120{
121 struct dentry *dentry = drvr->dbgfs_dir;
122
123 if (!IS_ERR_OR_NULL(dentry))
124 debugfs_create_file("counters", S_IRUGO, dentry,
125 sdcnt, &brcmf_debugfs_sdio_counter_ops);
126}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
index a2c4576cf9ff..b784920532d3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
@@ -76,4 +76,63 @@ do { \
76 76
77extern int brcmf_msg_level; 77extern int brcmf_msg_level;
78 78
79/*
80 * hold counter variables used in brcmfmac sdio driver.
81 */
82struct brcmf_sdio_count {
83 uint intrcount; /* Count of device interrupt callbacks */
84 uint lastintrs; /* Count as of last watchdog timer */
85 uint pollcnt; /* Count of active polls */
86 uint regfails; /* Count of R_REG failures */
87 uint tx_sderrs; /* Count of tx attempts with sd errors */
88 uint fcqueued; /* Tx packets that got queued */
89 uint rxrtx; /* Count of rtx requests (NAK to dongle) */
90 uint rx_toolong; /* Receive frames too long to receive */
91 uint rxc_errors; /* SDIO errors when reading control frames */
92 uint rx_hdrfail; /* SDIO errors on header reads */
93 uint rx_badhdr; /* Bad received headers (roosync?) */
94 uint rx_badseq; /* Mismatched rx sequence number */
95 uint fc_rcvd; /* Number of flow-control events received */
96 uint fc_xoff; /* Number which turned on flow-control */
97 uint fc_xon; /* Number which turned off flow-control */
98 uint rxglomfail; /* Failed deglom attempts */
99 uint rxglomframes; /* Number of glom frames (superframes) */
100 uint rxglompkts; /* Number of packets from glom frames */
101 uint f2rxhdrs; /* Number of header reads */
102 uint f2rxdata; /* Number of frame data reads */
103 uint f2txdata; /* Number of f2 frame writes */
104 uint f1regdata; /* Number of f1 register accesses */
105 uint tickcnt; /* Number of watchdog been schedule */
106 ulong tx_ctlerrs; /* Err of sending ctrl frames */
107 ulong tx_ctlpkts; /* Ctrl frames sent to dongle */
108 ulong rx_ctlerrs; /* Err of processing rx ctrl frames */
109 ulong rx_ctlpkts; /* Ctrl frames processed from dongle */
110 ulong rx_readahead_cnt; /* packets where header read-ahead was used */
111};
112
113struct brcmf_pub;
114#ifdef DEBUG
115void brcmf_debugfs_init(void);
116void brcmf_debugfs_exit(void);
117int brcmf_debugfs_attach(struct brcmf_pub *drvr);
118void brcmf_debugfs_detach(struct brcmf_pub *drvr);
119struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr);
120void brcmf_debugfs_create_sdio_count(struct brcmf_pub *drvr,
121 struct brcmf_sdio_count *sdcnt);
122#else
123static inline void brcmf_debugfs_init(void)
124{
125}
126static inline void brcmf_debugfs_exit(void)
127{
128}
129static inline int brcmf_debugfs_attach(struct brcmf_pub *drvr)
130{
131 return 0;
132}
133static inline void brcmf_debugfs_detach(struct brcmf_pub *drvr)
134{
135}
136#endif
137
79#endif /* _BRCMF_DBG_H_ */ 138#endif /* _BRCMF_DBG_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 8933f9b31a9a..57bf1d7ee80f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -1007,6 +1007,9 @@ int brcmf_attach(uint bus_hdrlen, struct device *dev)
1007 drvr->bus_if->drvr = drvr; 1007 drvr->bus_if->drvr = drvr;
1008 drvr->dev = dev; 1008 drvr->dev = dev;
1009 1009
1010 /* create device debugfs folder */
1011 brcmf_debugfs_attach(drvr);
1012
1010 /* Attach and link in the protocol */ 1013 /* Attach and link in the protocol */
1011 ret = brcmf_proto_attach(drvr); 1014 ret = brcmf_proto_attach(drvr);
1012 if (ret != 0) { 1015 if (ret != 0) {
@@ -1017,6 +1020,8 @@ int brcmf_attach(uint bus_hdrlen, struct device *dev)
1017 INIT_WORK(&drvr->setmacaddr_work, _brcmf_set_mac_address); 1020 INIT_WORK(&drvr->setmacaddr_work, _brcmf_set_mac_address);
1018 INIT_WORK(&drvr->multicast_work, _brcmf_set_multicast_list); 1021 INIT_WORK(&drvr->multicast_work, _brcmf_set_multicast_list);
1019 1022
1023 INIT_LIST_HEAD(&drvr->bus_if->dcmd_list);
1024
1020 return ret; 1025 return ret;
1021 1026
1022fail: 1027fail:
@@ -1123,6 +1128,7 @@ void brcmf_detach(struct device *dev)
1123 brcmf_proto_detach(drvr); 1128 brcmf_proto_detach(drvr);
1124 } 1129 }
1125 1130
1131 brcmf_debugfs_detach(drvr);
1126 bus_if->drvr = NULL; 1132 bus_if->drvr = NULL;
1127 kfree(drvr); 1133 kfree(drvr);
1128} 1134}
@@ -1192,6 +1198,8 @@ exit:
1192 1198
1193static void brcmf_driver_init(struct work_struct *work) 1199static void brcmf_driver_init(struct work_struct *work)
1194{ 1200{
1201 brcmf_debugfs_init();
1202
1195#ifdef CONFIG_BRCMFMAC_SDIO 1203#ifdef CONFIG_BRCMFMAC_SDIO
1196 brcmf_sdio_init(); 1204 brcmf_sdio_init();
1197#endif 1205#endif
@@ -1219,6 +1227,7 @@ static void __exit brcmfmac_module_exit(void)
1219#ifdef CONFIG_BRCMFMAC_USB 1227#ifdef CONFIG_BRCMFMAC_USB
1220 brcmf_usb_exit(); 1228 brcmf_usb_exit();
1221#endif 1229#endif
1230 brcmf_debugfs_exit();
1222} 1231}
1223 1232
1224module_init(brcmfmac_module_init); 1233module_init(brcmfmac_module_init);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 1dbf2be478c8..472f2ef5c652 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -31,6 +31,8 @@
31#include <linux/firmware.h> 31#include <linux/firmware.h>
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/bcma/bcma.h> 33#include <linux/bcma/bcma.h>
34#include <linux/debugfs.h>
35#include <linux/vmalloc.h>
34#include <asm/unaligned.h> 36#include <asm/unaligned.h>
35#include <defs.h> 37#include <defs.h>
36#include <brcmu_wifi.h> 38#include <brcmu_wifi.h>
@@ -48,6 +50,9 @@
48 50
49#define CBUF_LEN (128) 51#define CBUF_LEN (128)
50 52
53/* Device console log buffer state */
54#define CONSOLE_BUFFER_MAX 2024
55
51struct rte_log_le { 56struct rte_log_le {
52 __le32 buf; /* Can't be pointer on (64-bit) hosts */ 57 __le32 buf; /* Can't be pointer on (64-bit) hosts */
53 __le32 buf_size; 58 __le32 buf_size;
@@ -281,7 +286,7 @@ struct rte_console {
281 * Shared structure between dongle and the host. 286 * Shared structure between dongle and the host.
282 * The structure contains pointers to trap or assert information. 287 * The structure contains pointers to trap or assert information.
283 */ 288 */
284#define SDPCM_SHARED_VERSION 0x0002 289#define SDPCM_SHARED_VERSION 0x0003
285#define SDPCM_SHARED_VERSION_MASK 0x00FF 290#define SDPCM_SHARED_VERSION_MASK 0x00FF
286#define SDPCM_SHARED_ASSERT_BUILT 0x0100 291#define SDPCM_SHARED_ASSERT_BUILT 0x0100
287#define SDPCM_SHARED_ASSERT 0x0200 292#define SDPCM_SHARED_ASSERT 0x0200
@@ -428,6 +433,29 @@ struct brcmf_console {
428 u8 *buf; /* Log buffer (host copy) */ 433 u8 *buf; /* Log buffer (host copy) */
429 uint last; /* Last buffer read index */ 434 uint last; /* Last buffer read index */
430}; 435};
436
437struct brcmf_trap_info {
438 __le32 type;
439 __le32 epc;
440 __le32 cpsr;
441 __le32 spsr;
442 __le32 r0; /* a1 */
443 __le32 r1; /* a2 */
444 __le32 r2; /* a3 */
445 __le32 r3; /* a4 */
446 __le32 r4; /* v1 */
447 __le32 r5; /* v2 */
448 __le32 r6; /* v3 */
449 __le32 r7; /* v4 */
450 __le32 r8; /* v5 */
451 __le32 r9; /* sb/v6 */
452 __le32 r10; /* sl/v7 */
453 __le32 r11; /* fp/v8 */
454 __le32 r12; /* ip */
455 __le32 r13; /* sp */
456 __le32 r14; /* lr */
457 __le32 pc; /* r15 */
458};
431#endif /* DEBUG */ 459#endif /* DEBUG */
432 460
433struct sdpcm_shared { 461struct sdpcm_shared {
@@ -439,6 +467,7 @@ struct sdpcm_shared {
439 u32 console_addr; /* Address of struct rte_console */ 467 u32 console_addr; /* Address of struct rte_console */
440 u32 msgtrace_addr; 468 u32 msgtrace_addr;
441 u8 tag[32]; 469 u8 tag[32];
470 u32 brpt_addr;
442}; 471};
443 472
444struct sdpcm_shared_le { 473struct sdpcm_shared_le {
@@ -450,6 +479,7 @@ struct sdpcm_shared_le {
450 __le32 console_addr; /* Address of struct rte_console */ 479 __le32 console_addr; /* Address of struct rte_console */
451 __le32 msgtrace_addr; 480 __le32 msgtrace_addr;
452 u8 tag[32]; 481 u8 tag[32];
482 __le32 brpt_addr;
453}; 483};
454 484
455 485
@@ -502,12 +532,9 @@ struct brcmf_sdio {
502 bool intr; /* Use interrupts */ 532 bool intr; /* Use interrupts */
503 bool poll; /* Use polling */ 533 bool poll; /* Use polling */
504 bool ipend; /* Device interrupt is pending */ 534 bool ipend; /* Device interrupt is pending */
505 uint intrcount; /* Count of device interrupt callbacks */
506 uint lastintrs; /* Count as of last watchdog timer */
507 uint spurious; /* Count of spurious interrupts */ 535 uint spurious; /* Count of spurious interrupts */
508 uint pollrate; /* Ticks between device polls */ 536 uint pollrate; /* Ticks between device polls */
509 uint polltick; /* Tick counter */ 537 uint polltick; /* Tick counter */
510 uint pollcnt; /* Count of active polls */
511 538
512#ifdef DEBUG 539#ifdef DEBUG
513 uint console_interval; 540 uint console_interval;
@@ -515,8 +542,6 @@ struct brcmf_sdio {
515 uint console_addr; /* Console address from shared struct */ 542 uint console_addr; /* Console address from shared struct */
516#endif /* DEBUG */ 543#endif /* DEBUG */
517 544
518 uint regfails; /* Count of R_REG failures */
519
520 uint clkstate; /* State of sd and backplane clock(s) */ 545 uint clkstate; /* State of sd and backplane clock(s) */
521 bool activity; /* Activity flag for clock down */ 546 bool activity; /* Activity flag for clock down */
522 s32 idletime; /* Control for activity timeout */ 547 s32 idletime; /* Control for activity timeout */
@@ -531,33 +556,6 @@ struct brcmf_sdio {
531/* Field to decide if rx of control frames happen in rxbuf or lb-pool */ 556/* Field to decide if rx of control frames happen in rxbuf or lb-pool */
532 bool usebufpool; 557 bool usebufpool;
533 558
534 /* Some additional counters */
535 uint tx_sderrs; /* Count of tx attempts with sd errors */
536 uint fcqueued; /* Tx packets that got queued */
537 uint rxrtx; /* Count of rtx requests (NAK to dongle) */
538 uint rx_toolong; /* Receive frames too long to receive */
539 uint rxc_errors; /* SDIO errors when reading control frames */
540 uint rx_hdrfail; /* SDIO errors on header reads */
541 uint rx_badhdr; /* Bad received headers (roosync?) */
542 uint rx_badseq; /* Mismatched rx sequence number */
543 uint fc_rcvd; /* Number of flow-control events received */
544 uint fc_xoff; /* Number which turned on flow-control */
545 uint fc_xon; /* Number which turned off flow-control */
546 uint rxglomfail; /* Failed deglom attempts */
547 uint rxglomframes; /* Number of glom frames (superframes) */
548 uint rxglompkts; /* Number of packets from glom frames */
549 uint f2rxhdrs; /* Number of header reads */
550 uint f2rxdata; /* Number of frame data reads */
551 uint f2txdata; /* Number of f2 frame writes */
552 uint f1regdata; /* Number of f1 register accesses */
553 uint tickcnt; /* Number of watchdog been schedule */
554 unsigned long tx_ctlerrs; /* Err of sending ctrl frames */
555 unsigned long tx_ctlpkts; /* Ctrl frames sent to dongle */
556 unsigned long rx_ctlerrs; /* Err of processing rx ctrl frames */
557 unsigned long rx_ctlpkts; /* Ctrl frames processed from dongle */
558 unsigned long rx_readahead_cnt; /* Number of packets where header
559 * read-ahead was used. */
560
561 u8 *ctrl_frame_buf; 559 u8 *ctrl_frame_buf;
562 u32 ctrl_frame_len; 560 u32 ctrl_frame_len;
563 bool ctrl_frame_stat; 561 bool ctrl_frame_stat;
@@ -583,6 +581,7 @@ struct brcmf_sdio {
583 u32 fw_ptr; 581 u32 fw_ptr;
584 582
585 bool txoff; /* Transmit flow-controlled */ 583 bool txoff; /* Transmit flow-controlled */
584 struct brcmf_sdio_count sdcnt;
586}; 585};
587 586
588/* clkstate */ 587/* clkstate */
@@ -945,7 +944,7 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
945 if (ret == 0) 944 if (ret == 0)
946 w_sdreg32(bus, SMB_INT_ACK, 945 w_sdreg32(bus, SMB_INT_ACK,
947 offsetof(struct sdpcmd_regs, tosbmailbox)); 946 offsetof(struct sdpcmd_regs, tosbmailbox));
948 bus->f1regdata += 2; 947 bus->sdcnt.f1regdata += 2;
949 948
950 /* Dongle recomposed rx frames, accept them again */ 949 /* Dongle recomposed rx frames, accept them again */
951 if (hmb_data & HMB_DATA_NAKHANDLED) { 950 if (hmb_data & HMB_DATA_NAKHANDLED) {
@@ -984,12 +983,12 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
984 HMB_DATA_FCDATA_SHIFT; 983 HMB_DATA_FCDATA_SHIFT;
985 984
986 if (fcbits & ~bus->flowcontrol) 985 if (fcbits & ~bus->flowcontrol)
987 bus->fc_xoff++; 986 bus->sdcnt.fc_xoff++;
988 987
989 if (bus->flowcontrol & ~fcbits) 988 if (bus->flowcontrol & ~fcbits)
990 bus->fc_xon++; 989 bus->sdcnt.fc_xon++;
991 990
992 bus->fc_rcvd++; 991 bus->sdcnt.fc_rcvd++;
993 bus->flowcontrol = fcbits; 992 bus->flowcontrol = fcbits;
994 } 993 }
995 994
@@ -1021,7 +1020,7 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
1021 1020
1022 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL, 1021 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
1023 SFC_RF_TERM, &err); 1022 SFC_RF_TERM, &err);
1024 bus->f1regdata++; 1023 bus->sdcnt.f1regdata++;
1025 1024
1026 /* Wait until the packet has been flushed (device/FIFO stable) */ 1025 /* Wait until the packet has been flushed (device/FIFO stable) */
1027 for (lastrbc = retries = 0xffff; retries > 0; retries--) { 1026 for (lastrbc = retries = 0xffff; retries > 0; retries--) {
@@ -1029,7 +1028,7 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
1029 SBSDIO_FUNC1_RFRAMEBCHI, &err); 1028 SBSDIO_FUNC1_RFRAMEBCHI, &err);
1030 lo = brcmf_sdio_regrb(bus->sdiodev, 1029 lo = brcmf_sdio_regrb(bus->sdiodev,
1031 SBSDIO_FUNC1_RFRAMEBCLO, &err); 1030 SBSDIO_FUNC1_RFRAMEBCLO, &err);
1032 bus->f1regdata += 2; 1031 bus->sdcnt.f1regdata += 2;
1033 1032
1034 if ((hi == 0) && (lo == 0)) 1033 if ((hi == 0) && (lo == 0))
1035 break; 1034 break;
@@ -1047,11 +1046,11 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
1047 brcmf_dbg(INFO, "flush took %d iterations\n", 0xffff - retries); 1046 brcmf_dbg(INFO, "flush took %d iterations\n", 0xffff - retries);
1048 1047
1049 if (rtx) { 1048 if (rtx) {
1050 bus->rxrtx++; 1049 bus->sdcnt.rxrtx++;
1051 err = w_sdreg32(bus, SMB_NAK, 1050 err = w_sdreg32(bus, SMB_NAK,
1052 offsetof(struct sdpcmd_regs, tosbmailbox)); 1051 offsetof(struct sdpcmd_regs, tosbmailbox));
1053 1052
1054 bus->f1regdata++; 1053 bus->sdcnt.f1regdata++;
1055 if (err == 0) 1054 if (err == 0)
1056 bus->rxskip = true; 1055 bus->rxskip = true;
1057 } 1056 }
@@ -1243,7 +1242,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1243 dlen); 1242 dlen);
1244 errcode = -1; 1243 errcode = -1;
1245 } 1244 }
1246 bus->f2rxdata++; 1245 bus->sdcnt.f2rxdata++;
1247 1246
1248 /* On failure, kill the superframe, allow a couple retries */ 1247 /* On failure, kill the superframe, allow a couple retries */
1249 if (errcode < 0) { 1248 if (errcode < 0) {
@@ -1256,7 +1255,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1256 } else { 1255 } else {
1257 bus->glomerr = 0; 1256 bus->glomerr = 0;
1258 brcmf_sdbrcm_rxfail(bus, true, false); 1257 brcmf_sdbrcm_rxfail(bus, true, false);
1259 bus->rxglomfail++; 1258 bus->sdcnt.rxglomfail++;
1260 brcmf_sdbrcm_free_glom(bus); 1259 brcmf_sdbrcm_free_glom(bus);
1261 } 1260 }
1262 return 0; 1261 return 0;
@@ -1312,7 +1311,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1312 if (rxseq != seq) { 1311 if (rxseq != seq) {
1313 brcmf_dbg(INFO, "(superframe) rx_seq %d, expected %d\n", 1312 brcmf_dbg(INFO, "(superframe) rx_seq %d, expected %d\n",
1314 seq, rxseq); 1313 seq, rxseq);
1315 bus->rx_badseq++; 1314 bus->sdcnt.rx_badseq++;
1316 rxseq = seq; 1315 rxseq = seq;
1317 } 1316 }
1318 1317
@@ -1376,7 +1375,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1376 } else { 1375 } else {
1377 bus->glomerr = 0; 1376 bus->glomerr = 0;
1378 brcmf_sdbrcm_rxfail(bus, true, false); 1377 brcmf_sdbrcm_rxfail(bus, true, false);
1379 bus->rxglomfail++; 1378 bus->sdcnt.rxglomfail++;
1380 brcmf_sdbrcm_free_glom(bus); 1379 brcmf_sdbrcm_free_glom(bus);
1381 } 1380 }
1382 bus->nextlen = 0; 1381 bus->nextlen = 0;
@@ -1402,7 +1401,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1402 if (rxseq != seq) { 1401 if (rxseq != seq) {
1403 brcmf_dbg(GLOM, "rx_seq %d, expected %d\n", 1402 brcmf_dbg(GLOM, "rx_seq %d, expected %d\n",
1404 seq, rxseq); 1403 seq, rxseq);
1405 bus->rx_badseq++; 1404 bus->sdcnt.rx_badseq++;
1406 rxseq = seq; 1405 rxseq = seq;
1407 } 1406 }
1408 rxseq++; 1407 rxseq++;
@@ -1441,8 +1440,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1441 down(&bus->sdsem); 1440 down(&bus->sdsem);
1442 } 1441 }
1443 1442
1444 bus->rxglomframes++; 1443 bus->sdcnt.rxglomframes++;
1445 bus->rxglompkts += bus->glom.qlen; 1444 bus->sdcnt.rxglompkts += bus->glom.qlen;
1446 } 1445 }
1447 return num; 1446 return num;
1448} 1447}
@@ -1526,7 +1525,7 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
1526 brcmf_dbg(ERROR, "%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n", 1525 brcmf_dbg(ERROR, "%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
1527 len, len - doff, bus->sdiodev->bus_if->maxctl); 1526 len, len - doff, bus->sdiodev->bus_if->maxctl);
1528 bus->sdiodev->bus_if->dstats.rx_errors++; 1527 bus->sdiodev->bus_if->dstats.rx_errors++;
1529 bus->rx_toolong++; 1528 bus->sdcnt.rx_toolong++;
1530 brcmf_sdbrcm_rxfail(bus, false, false); 1529 brcmf_sdbrcm_rxfail(bus, false, false);
1531 goto done; 1530 goto done;
1532 } 1531 }
@@ -1536,13 +1535,13 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
1536 bus->sdiodev->sbwad, 1535 bus->sdiodev->sbwad,
1537 SDIO_FUNC_2, 1536 SDIO_FUNC_2,
1538 F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen); 1537 F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen);
1539 bus->f2rxdata++; 1538 bus->sdcnt.f2rxdata++;
1540 1539
1541 /* Control frame failures need retransmission */ 1540 /* Control frame failures need retransmission */
1542 if (sdret < 0) { 1541 if (sdret < 0) {
1543 brcmf_dbg(ERROR, "read %d control bytes failed: %d\n", 1542 brcmf_dbg(ERROR, "read %d control bytes failed: %d\n",
1544 rdlen, sdret); 1543 rdlen, sdret);
1545 bus->rxc_errors++; 1544 bus->sdcnt.rxc_errors++;
1546 brcmf_sdbrcm_rxfail(bus, true, true); 1545 brcmf_sdbrcm_rxfail(bus, true, true);
1547 goto done; 1546 goto done;
1548 } 1547 }
@@ -1589,7 +1588,7 @@ brcmf_alloc_pkt_and_read(struct brcmf_sdio *bus, u16 rdlen,
1589 /* Read the entire frame */ 1588 /* Read the entire frame */
1590 sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad, 1589 sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
1591 SDIO_FUNC_2, F2SYNC, *pkt); 1590 SDIO_FUNC_2, F2SYNC, *pkt);
1592 bus->f2rxdata++; 1591 bus->sdcnt.f2rxdata++;
1593 1592
1594 if (sdret < 0) { 1593 if (sdret < 0) {
1595 brcmf_dbg(ERROR, "(nextlen): read %d bytes failed: %d\n", 1594 brcmf_dbg(ERROR, "(nextlen): read %d bytes failed: %d\n",
@@ -1630,7 +1629,7 @@ brcmf_check_rxbuf(struct brcmf_sdio *bus, struct sk_buff *pkt, u8 *rxbuf,
1630 if ((u16)~(*len ^ check)) { 1629 if ((u16)~(*len ^ check)) {
1631 brcmf_dbg(ERROR, "(nextlen): HW hdr error: nextlen/len/check 0x%04x/0x%04x/0x%04x\n", 1630 brcmf_dbg(ERROR, "(nextlen): HW hdr error: nextlen/len/check 0x%04x/0x%04x/0x%04x\n",
1632 nextlen, *len, check); 1631 nextlen, *len, check);
1633 bus->rx_badhdr++; 1632 bus->sdcnt.rx_badhdr++;
1634 brcmf_sdbrcm_rxfail(bus, false, false); 1633 brcmf_sdbrcm_rxfail(bus, false, false);
1635 goto fail; 1634 goto fail;
1636 } 1635 }
@@ -1746,7 +1745,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
1746 bus->nextlen = 0; 1745 bus->nextlen = 0;
1747 } 1746 }
1748 1747
1749 bus->rx_readahead_cnt++; 1748 bus->sdcnt.rx_readahead_cnt++;
1750 1749
1751 /* Handle Flow Control */ 1750 /* Handle Flow Control */
1752 fcbits = SDPCM_FCMASK_VALUE( 1751 fcbits = SDPCM_FCMASK_VALUE(
@@ -1754,12 +1753,12 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
1754 1753
1755 if (bus->flowcontrol != fcbits) { 1754 if (bus->flowcontrol != fcbits) {
1756 if (~bus->flowcontrol & fcbits) 1755 if (~bus->flowcontrol & fcbits)
1757 bus->fc_xoff++; 1756 bus->sdcnt.fc_xoff++;
1758 1757
1759 if (bus->flowcontrol & ~fcbits) 1758 if (bus->flowcontrol & ~fcbits)
1760 bus->fc_xon++; 1759 bus->sdcnt.fc_xon++;
1761 1760
1762 bus->fc_rcvd++; 1761 bus->sdcnt.fc_rcvd++;
1763 bus->flowcontrol = fcbits; 1762 bus->flowcontrol = fcbits;
1764 } 1763 }
1765 1764
@@ -1767,7 +1766,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
1767 if (rxseq != seq) { 1766 if (rxseq != seq) {
1768 brcmf_dbg(INFO, "(nextlen): rx_seq %d, expected %d\n", 1767 brcmf_dbg(INFO, "(nextlen): rx_seq %d, expected %d\n",
1769 seq, rxseq); 1768 seq, rxseq);
1770 bus->rx_badseq++; 1769 bus->sdcnt.rx_badseq++;
1771 rxseq = seq; 1770 rxseq = seq;
1772 } 1771 }
1773 1772
@@ -1814,11 +1813,11 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
1814 sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad, 1813 sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
1815 SDIO_FUNC_2, F2SYNC, bus->rxhdr, 1814 SDIO_FUNC_2, F2SYNC, bus->rxhdr,
1816 BRCMF_FIRSTREAD); 1815 BRCMF_FIRSTREAD);
1817 bus->f2rxhdrs++; 1816 bus->sdcnt.f2rxhdrs++;
1818 1817
1819 if (sdret < 0) { 1818 if (sdret < 0) {
1820 brcmf_dbg(ERROR, "RXHEADER FAILED: %d\n", sdret); 1819 brcmf_dbg(ERROR, "RXHEADER FAILED: %d\n", sdret);
1821 bus->rx_hdrfail++; 1820 bus->sdcnt.rx_hdrfail++;
1822 brcmf_sdbrcm_rxfail(bus, true, true); 1821 brcmf_sdbrcm_rxfail(bus, true, true);
1823 continue; 1822 continue;
1824 } 1823 }
@@ -1840,7 +1839,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
1840 if ((u16) ~(len ^ check)) { 1839 if ((u16) ~(len ^ check)) {
1841 brcmf_dbg(ERROR, "HW hdr err: len/check 0x%04x/0x%04x\n", 1840 brcmf_dbg(ERROR, "HW hdr err: len/check 0x%04x/0x%04x\n",
1842 len, check); 1841 len, check);
1843 bus->rx_badhdr++; 1842 bus->sdcnt.rx_badhdr++;
1844 brcmf_sdbrcm_rxfail(bus, false, false); 1843 brcmf_sdbrcm_rxfail(bus, false, false);
1845 continue; 1844 continue;
1846 } 1845 }
@@ -1861,7 +1860,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
1861 if ((doff < SDPCM_HDRLEN) || (doff > len)) { 1860 if ((doff < SDPCM_HDRLEN) || (doff > len)) {
1862 brcmf_dbg(ERROR, "Bad data offset %d: HW len %d, min %d seq %d\n", 1861 brcmf_dbg(ERROR, "Bad data offset %d: HW len %d, min %d seq %d\n",
1863 doff, len, SDPCM_HDRLEN, seq); 1862 doff, len, SDPCM_HDRLEN, seq);
1864 bus->rx_badhdr++; 1863 bus->sdcnt.rx_badhdr++;
1865 brcmf_sdbrcm_rxfail(bus, false, false); 1864 brcmf_sdbrcm_rxfail(bus, false, false);
1866 continue; 1865 continue;
1867 } 1866 }
@@ -1880,19 +1879,19 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
1880 1879
1881 if (bus->flowcontrol != fcbits) { 1880 if (bus->flowcontrol != fcbits) {
1882 if (~bus->flowcontrol & fcbits) 1881 if (~bus->flowcontrol & fcbits)
1883 bus->fc_xoff++; 1882 bus->sdcnt.fc_xoff++;
1884 1883
1885 if (bus->flowcontrol & ~fcbits) 1884 if (bus->flowcontrol & ~fcbits)
1886 bus->fc_xon++; 1885 bus->sdcnt.fc_xon++;
1887 1886
1888 bus->fc_rcvd++; 1887 bus->sdcnt.fc_rcvd++;
1889 bus->flowcontrol = fcbits; 1888 bus->flowcontrol = fcbits;
1890 } 1889 }
1891 1890
1892 /* Check and update sequence number */ 1891 /* Check and update sequence number */
1893 if (rxseq != seq) { 1892 if (rxseq != seq) {
1894 brcmf_dbg(INFO, "rx_seq %d, expected %d\n", seq, rxseq); 1893 brcmf_dbg(INFO, "rx_seq %d, expected %d\n", seq, rxseq);
1895 bus->rx_badseq++; 1894 bus->sdcnt.rx_badseq++;
1896 rxseq = seq; 1895 rxseq = seq;
1897 } 1896 }
1898 1897
@@ -1937,7 +1936,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
1937 brcmf_dbg(ERROR, "too long: len %d rdlen %d\n", 1936 brcmf_dbg(ERROR, "too long: len %d rdlen %d\n",
1938 len, rdlen); 1937 len, rdlen);
1939 bus->sdiodev->bus_if->dstats.rx_errors++; 1938 bus->sdiodev->bus_if->dstats.rx_errors++;
1940 bus->rx_toolong++; 1939 bus->sdcnt.rx_toolong++;
1941 brcmf_sdbrcm_rxfail(bus, false, false); 1940 brcmf_sdbrcm_rxfail(bus, false, false);
1942 continue; 1941 continue;
1943 } 1942 }
@@ -1960,7 +1959,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
1960 /* Read the remaining frame data */ 1959 /* Read the remaining frame data */
1961 sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad, 1960 sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
1962 SDIO_FUNC_2, F2SYNC, pkt); 1961 SDIO_FUNC_2, F2SYNC, pkt);
1963 bus->f2rxdata++; 1962 bus->sdcnt.f2rxdata++;
1964 1963
1965 if (sdret < 0) { 1964 if (sdret < 0) {
1966 brcmf_dbg(ERROR, "read %d %s bytes failed: %d\n", rdlen, 1965 brcmf_dbg(ERROR, "read %d %s bytes failed: %d\n", rdlen,
@@ -2147,18 +2146,18 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
2147 2146
2148 ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad, 2147 ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad,
2149 SDIO_FUNC_2, F2SYNC, pkt); 2148 SDIO_FUNC_2, F2SYNC, pkt);
2150 bus->f2txdata++; 2149 bus->sdcnt.f2txdata++;
2151 2150
2152 if (ret < 0) { 2151 if (ret < 0) {
2153 /* On failure, abort the command and terminate the frame */ 2152 /* On failure, abort the command and terminate the frame */
2154 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n", 2153 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
2155 ret); 2154 ret);
2156 bus->tx_sderrs++; 2155 bus->sdcnt.tx_sderrs++;
2157 2156
2158 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2); 2157 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
2159 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL, 2158 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
2160 SFC_WF_TERM, NULL); 2159 SFC_WF_TERM, NULL);
2161 bus->f1regdata++; 2160 bus->sdcnt.f1regdata++;
2162 2161
2163 for (i = 0; i < 3; i++) { 2162 for (i = 0; i < 3; i++) {
2164 u8 hi, lo; 2163 u8 hi, lo;
@@ -2166,7 +2165,7 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
2166 SBSDIO_FUNC1_WFRAMEBCHI, NULL); 2165 SBSDIO_FUNC1_WFRAMEBCHI, NULL);
2167 lo = brcmf_sdio_regrb(bus->sdiodev, 2166 lo = brcmf_sdio_regrb(bus->sdiodev,
2168 SBSDIO_FUNC1_WFRAMEBCLO, NULL); 2167 SBSDIO_FUNC1_WFRAMEBCLO, NULL);
2169 bus->f1regdata += 2; 2168 bus->sdcnt.f1regdata += 2;
2170 if ((hi == 0) && (lo == 0)) 2169 if ((hi == 0) && (lo == 0))
2171 break; 2170 break;
2172 } 2171 }
@@ -2224,7 +2223,7 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
2224 ret = r_sdreg32(bus, &intstatus, 2223 ret = r_sdreg32(bus, &intstatus,
2225 offsetof(struct sdpcmd_regs, 2224 offsetof(struct sdpcmd_regs,
2226 intstatus)); 2225 intstatus));
2227 bus->f2txdata++; 2226 bus->sdcnt.f2txdata++;
2228 if (ret != 0) 2227 if (ret != 0)
2229 break; 2228 break;
2230 if (intstatus & bus->hostintmask) 2229 if (intstatus & bus->hostintmask)
@@ -2417,7 +2416,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2417 bus->ipend = false; 2416 bus->ipend = false;
2418 err = r_sdreg32(bus, &newstatus, 2417 err = r_sdreg32(bus, &newstatus,
2419 offsetof(struct sdpcmd_regs, intstatus)); 2418 offsetof(struct sdpcmd_regs, intstatus));
2420 bus->f1regdata++; 2419 bus->sdcnt.f1regdata++;
2421 if (err != 0) 2420 if (err != 0)
2422 newstatus = 0; 2421 newstatus = 0;
2423 newstatus &= bus->hostintmask; 2422 newstatus &= bus->hostintmask;
@@ -2426,7 +2425,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2426 err = w_sdreg32(bus, newstatus, 2425 err = w_sdreg32(bus, newstatus,
2427 offsetof(struct sdpcmd_regs, 2426 offsetof(struct sdpcmd_regs,
2428 intstatus)); 2427 intstatus));
2429 bus->f1regdata++; 2428 bus->sdcnt.f1regdata++;
2430 } 2429 }
2431 } 2430 }
2432 2431
@@ -2445,7 +2444,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2445 2444
2446 err = r_sdreg32(bus, &newstatus, 2445 err = r_sdreg32(bus, &newstatus,
2447 offsetof(struct sdpcmd_regs, intstatus)); 2446 offsetof(struct sdpcmd_regs, intstatus));
2448 bus->f1regdata += 2; 2447 bus->sdcnt.f1regdata += 2;
2449 bus->fcstate = 2448 bus->fcstate =
2450 !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)); 2449 !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE));
2451 intstatus |= (newstatus & bus->hostintmask); 2450 intstatus |= (newstatus & bus->hostintmask);
@@ -2502,7 +2501,7 @@ clkwait:
2502 int ret, i; 2501 int ret, i;
2503 2502
2504 ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad, 2503 ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
2505 SDIO_FUNC_2, F2SYNC, (u8 *) bus->ctrl_frame_buf, 2504 SDIO_FUNC_2, F2SYNC, bus->ctrl_frame_buf,
2506 (u32) bus->ctrl_frame_len); 2505 (u32) bus->ctrl_frame_len);
2507 2506
2508 if (ret < 0) { 2507 if (ret < 0) {
@@ -2510,13 +2509,13 @@ clkwait:
2510 terminate the frame */ 2509 terminate the frame */
2511 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n", 2510 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
2512 ret); 2511 ret);
2513 bus->tx_sderrs++; 2512 bus->sdcnt.tx_sderrs++;
2514 2513
2515 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2); 2514 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
2516 2515
2517 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL, 2516 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
2518 SFC_WF_TERM, &err); 2517 SFC_WF_TERM, &err);
2519 bus->f1regdata++; 2518 bus->sdcnt.f1regdata++;
2520 2519
2521 for (i = 0; i < 3; i++) { 2520 for (i = 0; i < 3; i++) {
2522 u8 hi, lo; 2521 u8 hi, lo;
@@ -2526,7 +2525,7 @@ clkwait:
2526 lo = brcmf_sdio_regrb(bus->sdiodev, 2525 lo = brcmf_sdio_regrb(bus->sdiodev,
2527 SBSDIO_FUNC1_WFRAMEBCLO, 2526 SBSDIO_FUNC1_WFRAMEBCLO,
2528 &err); 2527 &err);
2529 bus->f1regdata += 2; 2528 bus->sdcnt.f1regdata += 2;
2530 if ((hi == 0) && (lo == 0)) 2529 if ((hi == 0) && (lo == 0))
2531 break; 2530 break;
2532 } 2531 }
@@ -2657,7 +2656,7 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
2657 /* Check for existing queue, current flow-control, 2656 /* Check for existing queue, current flow-control,
2658 pending event, or pending clock */ 2657 pending event, or pending clock */
2659 brcmf_dbg(TRACE, "deferring pktq len %d\n", pktq_len(&bus->txq)); 2658 brcmf_dbg(TRACE, "deferring pktq len %d\n", pktq_len(&bus->txq));
2660 bus->fcqueued++; 2659 bus->sdcnt.fcqueued++;
2661 2660
2662 /* Priority based enq */ 2661 /* Priority based enq */
2663 spin_lock_bh(&bus->txqlock); 2662 spin_lock_bh(&bus->txqlock);
@@ -2845,13 +2844,13 @@ static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
2845 /* On failure, abort the command and terminate the frame */ 2844 /* On failure, abort the command and terminate the frame */
2846 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n", 2845 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
2847 ret); 2846 ret);
2848 bus->tx_sderrs++; 2847 bus->sdcnt.tx_sderrs++;
2849 2848
2850 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2); 2849 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
2851 2850
2852 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL, 2851 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
2853 SFC_WF_TERM, NULL); 2852 SFC_WF_TERM, NULL);
2854 bus->f1regdata++; 2853 bus->sdcnt.f1regdata++;
2855 2854
2856 for (i = 0; i < 3; i++) { 2855 for (i = 0; i < 3; i++) {
2857 u8 hi, lo; 2856 u8 hi, lo;
@@ -2859,7 +2858,7 @@ static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
2859 SBSDIO_FUNC1_WFRAMEBCHI, NULL); 2858 SBSDIO_FUNC1_WFRAMEBCHI, NULL);
2860 lo = brcmf_sdio_regrb(bus->sdiodev, 2859 lo = brcmf_sdio_regrb(bus->sdiodev,
2861 SBSDIO_FUNC1_WFRAMEBCLO, NULL); 2860 SBSDIO_FUNC1_WFRAMEBCLO, NULL);
2862 bus->f1regdata += 2; 2861 bus->sdcnt.f1regdata += 2;
2863 if (hi == 0 && lo == 0) 2862 if (hi == 0 && lo == 0)
2864 break; 2863 break;
2865 } 2864 }
@@ -2976,13 +2975,324 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2976 up(&bus->sdsem); 2975 up(&bus->sdsem);
2977 2976
2978 if (ret) 2977 if (ret)
2979 bus->tx_ctlerrs++; 2978 bus->sdcnt.tx_ctlerrs++;
2980 else 2979 else
2981 bus->tx_ctlpkts++; 2980 bus->sdcnt.tx_ctlpkts++;
2982 2981
2983 return ret ? -EIO : 0; 2982 return ret ? -EIO : 0;
2984} 2983}
2985 2984
2985#ifdef DEBUG
2986static inline bool brcmf_sdio_valid_shared_address(u32 addr)
2987{
2988 return !(addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff));
2989}
2990
2991static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
2992 struct sdpcm_shared *sh)
2993{
2994 u32 addr;
2995 int rv;
2996 u32 shaddr = 0;
2997 struct sdpcm_shared_le sh_le;
2998 __le32 addr_le;
2999
3000 shaddr = bus->ramsize - 4;
3001
3002 /*
3003 * Read last word in socram to determine
3004 * address of sdpcm_shared structure
3005 */
3006 rv = brcmf_sdbrcm_membytes(bus, false, shaddr,
3007 (u8 *)&addr_le, 4);
3008 if (rv < 0)
3009 return rv;
3010
3011 addr = le32_to_cpu(addr_le);
3012
3013 brcmf_dbg(INFO, "sdpcm_shared address 0x%08X\n", addr);
3014
3015 /*
3016 * Check if addr is valid.
3017 * NVRAM length at the end of memory should have been overwritten.
3018 */
3019 if (!brcmf_sdio_valid_shared_address(addr)) {
3020 brcmf_dbg(ERROR, "invalid sdpcm_shared address 0x%08X\n",
3021 addr);
3022 return -EINVAL;
3023 }
3024
3025 /* Read hndrte_shared structure */
3026 rv = brcmf_sdbrcm_membytes(bus, false, addr, (u8 *)&sh_le,
3027 sizeof(struct sdpcm_shared_le));
3028 if (rv < 0)
3029 return rv;
3030
3031 /* Endianness */
3032 sh->flags = le32_to_cpu(sh_le.flags);
3033 sh->trap_addr = le32_to_cpu(sh_le.trap_addr);
3034 sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr);
3035 sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr);
3036 sh->assert_line = le32_to_cpu(sh_le.assert_line);
3037 sh->console_addr = le32_to_cpu(sh_le.console_addr);
3038 sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
3039
3040 if ((sh->flags & SDPCM_SHARED_VERSION_MASK) != SDPCM_SHARED_VERSION) {
3041 brcmf_dbg(ERROR,
3042 "sdpcm_shared version mismatch: dhd %d dongle %d\n",
3043 SDPCM_SHARED_VERSION,
3044 sh->flags & SDPCM_SHARED_VERSION_MASK);
3045 return -EPROTO;
3046 }
3047
3048 return 0;
3049}
3050
3051static int brcmf_sdio_dump_console(struct brcmf_sdio *bus,
3052 struct sdpcm_shared *sh, char __user *data,
3053 size_t count)
3054{
3055 u32 addr, console_ptr, console_size, console_index;
3056 char *conbuf = NULL;
3057 __le32 sh_val;
3058 int rv;
3059 loff_t pos = 0;
3060 int nbytes = 0;
3061
3062 /* obtain console information from device memory */
3063 addr = sh->console_addr + offsetof(struct rte_console, log_le);
3064 rv = brcmf_sdbrcm_membytes(bus, false, addr,
3065 (u8 *)&sh_val, sizeof(u32));
3066 if (rv < 0)
3067 return rv;
3068 console_ptr = le32_to_cpu(sh_val);
3069
3070 addr = sh->console_addr + offsetof(struct rte_console, log_le.buf_size);
3071 rv = brcmf_sdbrcm_membytes(bus, false, addr,
3072 (u8 *)&sh_val, sizeof(u32));
3073 if (rv < 0)
3074 return rv;
3075 console_size = le32_to_cpu(sh_val);
3076
3077 addr = sh->console_addr + offsetof(struct rte_console, log_le.idx);
3078 rv = brcmf_sdbrcm_membytes(bus, false, addr,
3079 (u8 *)&sh_val, sizeof(u32));
3080 if (rv < 0)
3081 return rv;
3082 console_index = le32_to_cpu(sh_val);
3083
3084 /* allocate buffer for console data */
3085 if (console_size <= CONSOLE_BUFFER_MAX)
3086 conbuf = vzalloc(console_size+1);
3087
3088 if (!conbuf)
3089 return -ENOMEM;
3090
3091 /* obtain the console data from device */
3092 conbuf[console_size] = '\0';
3093 rv = brcmf_sdbrcm_membytes(bus, false, console_ptr, (u8 *)conbuf,
3094 console_size);
3095 if (rv < 0)
3096 goto done;
3097
3098 rv = simple_read_from_buffer(data, count, &pos,
3099 conbuf + console_index,
3100 console_size - console_index);
3101 if (rv < 0)
3102 goto done;
3103
3104 nbytes = rv;
3105 if (console_index > 0) {
3106 pos = 0;
3107 rv = simple_read_from_buffer(data+nbytes, count, &pos,
3108 conbuf, console_index - 1);
3109 if (rv < 0)
3110 goto done;
3111 rv += nbytes;
3112 }
3113done:
3114 vfree(conbuf);
3115 return rv;
3116}
3117
3118static int brcmf_sdio_trap_info(struct brcmf_sdio *bus, struct sdpcm_shared *sh,
3119 char __user *data, size_t count)
3120{
3121 int error, res;
3122 char buf[350];
3123 struct brcmf_trap_info tr;
3124 int nbytes;
3125 loff_t pos = 0;
3126
3127 if ((sh->flags & SDPCM_SHARED_TRAP) == 0)
3128 return 0;
3129
3130 error = brcmf_sdbrcm_membytes(bus, false, sh->trap_addr, (u8 *)&tr,
3131 sizeof(struct brcmf_trap_info));
3132 if (error < 0)
3133 return error;
3134
3135 nbytes = brcmf_sdio_dump_console(bus, sh, data, count);
3136 if (nbytes < 0)
3137 return nbytes;
3138
3139 res = scnprintf(buf, sizeof(buf),
3140 "dongle trap info: type 0x%x @ epc 0x%08x\n"
3141 " cpsr 0x%08x spsr 0x%08x sp 0x%08x\n"
3142 " lr 0x%08x pc 0x%08x offset 0x%x\n"
3143 " r0 0x%08x r1 0x%08x r2 0x%08x r3 0x%08x\n"
3144 " r4 0x%08x r5 0x%08x r6 0x%08x r7 0x%08x\n",
3145 le32_to_cpu(tr.type), le32_to_cpu(tr.epc),
3146 le32_to_cpu(tr.cpsr), le32_to_cpu(tr.spsr),
3147 le32_to_cpu(tr.r13), le32_to_cpu(tr.r14),
3148 le32_to_cpu(tr.pc), sh->trap_addr,
3149 le32_to_cpu(tr.r0), le32_to_cpu(tr.r1),
3150 le32_to_cpu(tr.r2), le32_to_cpu(tr.r3),
3151 le32_to_cpu(tr.r4), le32_to_cpu(tr.r5),
3152 le32_to_cpu(tr.r6), le32_to_cpu(tr.r7));
3153
3154 error = simple_read_from_buffer(data+nbytes, count, &pos, buf, res);
3155 if (error < 0)
3156 return error;
3157
3158 nbytes += error;
3159 return nbytes;
3160}
3161
3162static int brcmf_sdio_assert_info(struct brcmf_sdio *bus,
3163 struct sdpcm_shared *sh, char __user *data,
3164 size_t count)
3165{
3166 int error = 0;
3167 char buf[200];
3168 char file[80] = "?";
3169 char expr[80] = "<???>";
3170 int res;
3171 loff_t pos = 0;
3172
3173 if ((sh->flags & SDPCM_SHARED_ASSERT_BUILT) == 0) {
3174 brcmf_dbg(INFO, "firmware not built with -assert\n");
3175 return 0;
3176 } else if ((sh->flags & SDPCM_SHARED_ASSERT) == 0) {
3177 brcmf_dbg(INFO, "no assert in dongle\n");
3178 return 0;
3179 }
3180
3181 if (sh->assert_file_addr != 0) {
3182 error = brcmf_sdbrcm_membytes(bus, false, sh->assert_file_addr,
3183 (u8 *)file, 80);
3184 if (error < 0)
3185 return error;
3186 }
3187 if (sh->assert_exp_addr != 0) {
3188 error = brcmf_sdbrcm_membytes(bus, false, sh->assert_exp_addr,
3189 (u8 *)expr, 80);
3190 if (error < 0)
3191 return error;
3192 }
3193
3194 res = scnprintf(buf, sizeof(buf),
3195 "dongle assert: %s:%d: assert(%s)\n",
3196 file, sh->assert_line, expr);
3197 return simple_read_from_buffer(data, count, &pos, buf, res);
3198}
3199
3200static int brcmf_sdbrcm_checkdied(struct brcmf_sdio *bus)
3201{
3202 int error;
3203 struct sdpcm_shared sh;
3204
3205 down(&bus->sdsem);
3206 error = brcmf_sdio_readshared(bus, &sh);
3207 up(&bus->sdsem);
3208
3209 if (error < 0)
3210 return error;
3211
3212 if ((sh.flags & SDPCM_SHARED_ASSERT_BUILT) == 0)
3213 brcmf_dbg(INFO, "firmware not built with -assert\n");
3214 else if (sh.flags & SDPCM_SHARED_ASSERT)
3215 brcmf_dbg(ERROR, "assertion in dongle\n");
3216
3217 if (sh.flags & SDPCM_SHARED_TRAP)
3218 brcmf_dbg(ERROR, "firmware trap in dongle\n");
3219
3220 return 0;
3221}
3222
3223static int brcmf_sdbrcm_died_dump(struct brcmf_sdio *bus, char __user *data,
3224 size_t count, loff_t *ppos)
3225{
3226 int error = 0;
3227 struct sdpcm_shared sh;
3228 int nbytes = 0;
3229 loff_t pos = *ppos;
3230
3231 if (pos != 0)
3232 return 0;
3233
3234 down(&bus->sdsem);
3235 error = brcmf_sdio_readshared(bus, &sh);
3236 if (error < 0)
3237 goto done;
3238
3239 error = brcmf_sdio_assert_info(bus, &sh, data, count);
3240 if (error < 0)
3241 goto done;
3242
3243 nbytes = error;
3244 error = brcmf_sdio_trap_info(bus, &sh, data, count);
3245 if (error < 0)
3246 goto done;
3247
3248 error += nbytes;
3249 *ppos += error;
3250done:
3251 up(&bus->sdsem);
3252 return error;
3253}
3254
3255static ssize_t brcmf_sdio_forensic_read(struct file *f, char __user *data,
3256 size_t count, loff_t *ppos)
3257{
3258 struct brcmf_sdio *bus = f->private_data;
3259 int res;
3260
3261 res = brcmf_sdbrcm_died_dump(bus, data, count, ppos);
3262 if (res > 0)
3263 *ppos += res;
3264 return (ssize_t)res;
3265}
3266
3267static const struct file_operations brcmf_sdio_forensic_ops = {
3268 .owner = THIS_MODULE,
3269 .open = simple_open,
3270 .read = brcmf_sdio_forensic_read
3271};
3272
3273static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
3274{
3275 struct brcmf_pub *drvr = bus->sdiodev->bus_if->drvr;
3276 struct dentry *dentry = brcmf_debugfs_get_devdir(drvr);
3277
3278 if (IS_ERR_OR_NULL(dentry))
3279 return;
3280
3281 debugfs_create_file("forensics", S_IRUGO, dentry, bus,
3282 &brcmf_sdio_forensic_ops);
3283 brcmf_debugfs_create_sdio_count(drvr, &bus->sdcnt);
3284}
3285#else
3286static int brcmf_sdbrcm_checkdied(struct brcmf_sdio *bus)
3287{
3288 return 0;
3289}
3290
3291static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
3292{
3293}
3294#endif /* DEBUG */
3295
2986static int 3296static int
2987brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen) 3297brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
2988{ 3298{
@@ -3009,60 +3319,27 @@ brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
3009 rxlen, msglen); 3319 rxlen, msglen);
3010 } else if (timeleft == 0) { 3320 } else if (timeleft == 0) {
3011 brcmf_dbg(ERROR, "resumed on timeout\n"); 3321 brcmf_dbg(ERROR, "resumed on timeout\n");
3322 brcmf_sdbrcm_checkdied(bus);
3012 } else if (pending) { 3323 } else if (pending) {
3013 brcmf_dbg(CTL, "cancelled\n"); 3324 brcmf_dbg(CTL, "cancelled\n");
3014 return -ERESTARTSYS; 3325 return -ERESTARTSYS;
3015 } else { 3326 } else {
3016 brcmf_dbg(CTL, "resumed for unknown reason?\n"); 3327 brcmf_dbg(CTL, "resumed for unknown reason?\n");
3328 brcmf_sdbrcm_checkdied(bus);
3017 } 3329 }
3018 3330
3019 if (rxlen) 3331 if (rxlen)
3020 bus->rx_ctlpkts++; 3332 bus->sdcnt.rx_ctlpkts++;
3021 else 3333 else
3022 bus->rx_ctlerrs++; 3334 bus->sdcnt.rx_ctlerrs++;
3023 3335
3024 return rxlen ? (int)rxlen : -ETIMEDOUT; 3336 return rxlen ? (int)rxlen : -ETIMEDOUT;
3025} 3337}
3026 3338
3027static int brcmf_sdbrcm_downloadvars(struct brcmf_sdio *bus, void *arg, int len)
3028{
3029 int bcmerror = 0;
3030
3031 brcmf_dbg(TRACE, "Enter\n");
3032
3033 /* Basic sanity checks */
3034 if (bus->sdiodev->bus_if->drvr_up) {
3035 bcmerror = -EISCONN;
3036 goto err;
3037 }
3038 if (!len) {
3039 bcmerror = -EOVERFLOW;
3040 goto err;
3041 }
3042
3043 /* Free the old ones and replace with passed variables */
3044 kfree(bus->vars);
3045
3046 bus->vars = kmalloc(len, GFP_ATOMIC);
3047 bus->varsz = bus->vars ? len : 0;
3048 if (bus->vars == NULL) {
3049 bcmerror = -ENOMEM;
3050 goto err;
3051 }
3052
3053 /* Copy the passed variables, which should include the
3054 terminating double-null */
3055 memcpy(bus->vars, arg, bus->varsz);
3056err:
3057 return bcmerror;
3058}
3059
3060static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus) 3339static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus)
3061{ 3340{
3062 int bcmerror = 0; 3341 int bcmerror = 0;
3063 u32 varsize;
3064 u32 varaddr; 3342 u32 varaddr;
3065 u8 *vbuffer;
3066 u32 varsizew; 3343 u32 varsizew;
3067 __le32 varsizew_le; 3344 __le32 varsizew_le;
3068#ifdef DEBUG 3345#ifdef DEBUG
@@ -3071,56 +3348,44 @@ static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus)
3071 3348
3072 /* Even if there are no vars are to be written, we still 3349 /* Even if there are no vars are to be written, we still
3073 need to set the ramsize. */ 3350 need to set the ramsize. */
3074 varsize = bus->varsz ? roundup(bus->varsz, 4) : 0; 3351 varaddr = (bus->ramsize - 4) - bus->varsz;
3075 varaddr = (bus->ramsize - 4) - varsize;
3076 3352
3077 if (bus->vars) { 3353 if (bus->vars) {
3078 vbuffer = kzalloc(varsize, GFP_ATOMIC);
3079 if (!vbuffer)
3080 return -ENOMEM;
3081
3082 memcpy(vbuffer, bus->vars, bus->varsz);
3083
3084 /* Write the vars list */ 3354 /* Write the vars list */
3085 bcmerror = 3355 bcmerror = brcmf_sdbrcm_membytes(bus, true, varaddr,
3086 brcmf_sdbrcm_membytes(bus, true, varaddr, vbuffer, varsize); 3356 bus->vars, bus->varsz);
3087#ifdef DEBUG 3357#ifdef DEBUG
3088 /* Verify NVRAM bytes */ 3358 /* Verify NVRAM bytes */
3089 brcmf_dbg(INFO, "Compare NVRAM dl & ul; varsize=%d\n", varsize); 3359 brcmf_dbg(INFO, "Compare NVRAM dl & ul; varsize=%d\n",
3090 nvram_ularray = kmalloc(varsize, GFP_ATOMIC); 3360 bus->varsz);
3091 if (!nvram_ularray) { 3361 nvram_ularray = kmalloc(bus->varsz, GFP_ATOMIC);
3092 kfree(vbuffer); 3362 if (!nvram_ularray)
3093 return -ENOMEM; 3363 return -ENOMEM;
3094 }
3095 3364
3096 /* Upload image to verify downloaded contents. */ 3365 /* Upload image to verify downloaded contents. */
3097 memset(nvram_ularray, 0xaa, varsize); 3366 memset(nvram_ularray, 0xaa, bus->varsz);
3098 3367
3099 /* Read the vars list to temp buffer for comparison */ 3368 /* Read the vars list to temp buffer for comparison */
3100 bcmerror = 3369 bcmerror = brcmf_sdbrcm_membytes(bus, false, varaddr,
3101 brcmf_sdbrcm_membytes(bus, false, varaddr, nvram_ularray, 3370 nvram_ularray, bus->varsz);
3102 varsize);
3103 if (bcmerror) { 3371 if (bcmerror) {
3104 brcmf_dbg(ERROR, "error %d on reading %d nvram bytes at 0x%08x\n", 3372 brcmf_dbg(ERROR, "error %d on reading %d nvram bytes at 0x%08x\n",
3105 bcmerror, varsize, varaddr); 3373 bcmerror, bus->varsz, varaddr);
3106 } 3374 }
3107 /* Compare the org NVRAM with the one read from RAM */ 3375 /* Compare the org NVRAM with the one read from RAM */
3108 if (memcmp(vbuffer, nvram_ularray, varsize)) 3376 if (memcmp(bus->vars, nvram_ularray, bus->varsz))
3109 brcmf_dbg(ERROR, "Downloaded NVRAM image is corrupted\n"); 3377 brcmf_dbg(ERROR, "Downloaded NVRAM image is corrupted\n");
3110 else 3378 else
3111 brcmf_dbg(ERROR, "Download/Upload/Compare of NVRAM ok\n"); 3379 brcmf_dbg(ERROR, "Download/Upload/Compare of NVRAM ok\n");
3112 3380
3113 kfree(nvram_ularray); 3381 kfree(nvram_ularray);
3114#endif /* DEBUG */ 3382#endif /* DEBUG */
3115
3116 kfree(vbuffer);
3117 } 3383 }
3118 3384
3119 /* adjust to the user specified RAM */ 3385 /* adjust to the user specified RAM */
3120 brcmf_dbg(INFO, "Physical memory size: %d\n", bus->ramsize); 3386 brcmf_dbg(INFO, "Physical memory size: %d\n", bus->ramsize);
3121 brcmf_dbg(INFO, "Vars are at %d, orig varsize is %d\n", 3387 brcmf_dbg(INFO, "Vars are at %d, orig varsize is %d\n",
3122 varaddr, varsize); 3388 varaddr, bus->varsz);
3123 varsize = ((bus->ramsize - 4) - varaddr);
3124 3389
3125 /* 3390 /*
3126 * Determine the length token: 3391 * Determine the length token:
@@ -3131,13 +3396,13 @@ static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus)
3131 varsizew = 0; 3396 varsizew = 0;
3132 varsizew_le = cpu_to_le32(0); 3397 varsizew_le = cpu_to_le32(0);
3133 } else { 3398 } else {
3134 varsizew = varsize / 4; 3399 varsizew = bus->varsz / 4;
3135 varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF); 3400 varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
3136 varsizew_le = cpu_to_le32(varsizew); 3401 varsizew_le = cpu_to_le32(varsizew);
3137 } 3402 }
3138 3403
3139 brcmf_dbg(INFO, "New varsize is %d, length token=0x%08x\n", 3404 brcmf_dbg(INFO, "New varsize is %d, length token=0x%08x\n",
3140 varsize, varsizew); 3405 bus->varsz, varsizew);
3141 3406
3142 /* Write the length token to the last word */ 3407 /* Write the length token to the last word */
3143 bcmerror = brcmf_sdbrcm_membytes(bus, true, (bus->ramsize - 4), 3408 bcmerror = brcmf_sdbrcm_membytes(bus, true, (bus->ramsize - 4),
@@ -3261,13 +3526,21 @@ err:
3261 * by two NULs. 3526 * by two NULs.
3262*/ 3527*/
3263 3528
3264static uint brcmf_process_nvram_vars(char *varbuf, uint len) 3529static int brcmf_process_nvram_vars(struct brcmf_sdio *bus)
3265{ 3530{
3531 char *varbuf;
3266 char *dp; 3532 char *dp;
3267 bool findNewline; 3533 bool findNewline;
3268 int column; 3534 int column;
3269 uint buf_len, n; 3535 int ret = 0;
3536 uint buf_len, n, len;
3270 3537
3538 len = bus->firmware->size;
3539 varbuf = vmalloc(len);
3540 if (!varbuf)
3541 return -ENOMEM;
3542
3543 memcpy(varbuf, bus->firmware->data, len);
3271 dp = varbuf; 3544 dp = varbuf;
3272 3545
3273 findNewline = false; 3546 findNewline = false;
@@ -3296,56 +3569,44 @@ static uint brcmf_process_nvram_vars(char *varbuf, uint len)
3296 column++; 3569 column++;
3297 } 3570 }
3298 buf_len = dp - varbuf; 3571 buf_len = dp - varbuf;
3299
3300 while (dp < varbuf + n) 3572 while (dp < varbuf + n)
3301 *dp++ = 0; 3573 *dp++ = 0;
3302 3574
3303 return buf_len; 3575 kfree(bus->vars);
3576 /* roundup needed for download to device */
3577 bus->varsz = roundup(buf_len + 1, 4);
3578 bus->vars = kmalloc(bus->varsz, GFP_KERNEL);
3579 if (bus->vars == NULL) {
3580 bus->varsz = 0;
3581 ret = -ENOMEM;
3582 goto err;
3583 }
3584
3585 /* copy the processed variables and add null termination */
3586 memcpy(bus->vars, varbuf, buf_len);
3587 bus->vars[buf_len] = 0;
3588err:
3589 vfree(varbuf);
3590 return ret;
3304} 3591}
3305 3592
3306static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus) 3593static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
3307{ 3594{
3308 uint len;
3309 char *memblock = NULL;
3310 char *bufp;
3311 int ret; 3595 int ret;
3312 3596
3597 if (bus->sdiodev->bus_if->drvr_up)
3598 return -EISCONN;
3599
3313 ret = request_firmware(&bus->firmware, BRCMF_SDIO_NV_NAME, 3600 ret = request_firmware(&bus->firmware, BRCMF_SDIO_NV_NAME,
3314 &bus->sdiodev->func[2]->dev); 3601 &bus->sdiodev->func[2]->dev);
3315 if (ret) { 3602 if (ret) {
3316 brcmf_dbg(ERROR, "Fail to request nvram %d\n", ret); 3603 brcmf_dbg(ERROR, "Fail to request nvram %d\n", ret);
3317 return ret; 3604 return ret;
3318 } 3605 }
3319 bus->fw_ptr = 0;
3320
3321 memblock = kmalloc(MEMBLOCK, GFP_ATOMIC);
3322 if (memblock == NULL) {
3323 ret = -ENOMEM;
3324 goto err;
3325 }
3326
3327 len = brcmf_sdbrcm_get_image(memblock, MEMBLOCK, bus);
3328
3329 if (len > 0 && len < MEMBLOCK) {
3330 bufp = (char *)memblock;
3331 bufp[len] = 0;
3332 len = brcmf_process_nvram_vars(bufp, len);
3333 bufp += len;
3334 *bufp++ = 0;
3335 if (len)
3336 ret = brcmf_sdbrcm_downloadvars(bus, memblock, len + 1);
3337 if (ret)
3338 brcmf_dbg(ERROR, "error downloading vars: %d\n", ret);
3339 } else {
3340 brcmf_dbg(ERROR, "error reading nvram file: %d\n", len);
3341 ret = -EIO;
3342 }
3343 3606
3344err: 3607 ret = brcmf_process_nvram_vars(bus);
3345 kfree(memblock);
3346 3608
3347 release_firmware(bus->firmware); 3609 release_firmware(bus->firmware);
3348 bus->fw_ptr = 0;
3349 3610
3350 return ret; 3611 return ret;
3351} 3612}
@@ -3419,7 +3680,7 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
3419 return 0; 3680 return 0;
3420 3681
3421 /* Start the watchdog timer */ 3682 /* Start the watchdog timer */
3422 bus->tickcnt = 0; 3683 bus->sdcnt.tickcnt = 0;
3423 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS); 3684 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
3424 3685
3425 down(&bus->sdsem); 3686 down(&bus->sdsem);
@@ -3512,7 +3773,7 @@ void brcmf_sdbrcm_isr(void *arg)
3512 return; 3773 return;
3513 } 3774 }
3514 /* Count the interrupt call */ 3775 /* Count the interrupt call */
3515 bus->intrcount++; 3776 bus->sdcnt.intrcount++;
3516 bus->ipend = true; 3777 bus->ipend = true;
3517 3778
3518 /* Shouldn't get this interrupt if we're sleeping? */ 3779 /* Shouldn't get this interrupt if we're sleeping? */
@@ -3554,7 +3815,8 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3554 bus->polltick = 0; 3815 bus->polltick = 0;
3555 3816
3556 /* Check device if no interrupts */ 3817 /* Check device if no interrupts */
3557 if (!bus->intr || (bus->intrcount == bus->lastintrs)) { 3818 if (!bus->intr ||
3819 (bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) {
3558 3820
3559 if (!bus->dpc_sched) { 3821 if (!bus->dpc_sched) {
3560 u8 devpend; 3822 u8 devpend;
@@ -3569,7 +3831,7 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3569 /* If there is something, make like the ISR and 3831 /* If there is something, make like the ISR and
3570 schedule the DPC */ 3832 schedule the DPC */
3571 if (intstatus) { 3833 if (intstatus) {
3572 bus->pollcnt++; 3834 bus->sdcnt.pollcnt++;
3573 bus->ipend = true; 3835 bus->ipend = true;
3574 3836
3575 bus->dpc_sched = true; 3837 bus->dpc_sched = true;
@@ -3581,7 +3843,7 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3581 } 3843 }
3582 3844
3583 /* Update interrupt tracking */ 3845 /* Update interrupt tracking */
3584 bus->lastintrs = bus->intrcount; 3846 bus->sdcnt.lastintrs = bus->sdcnt.intrcount;
3585 } 3847 }
3586#ifdef DEBUG 3848#ifdef DEBUG
3587 /* Poll for console output periodically */ 3849 /* Poll for console output periodically */
@@ -3623,6 +3885,8 @@ static bool brcmf_sdbrcm_chipmatch(u16 chipid)
3623 return true; 3885 return true;
3624 if (chipid == BCM4330_CHIP_ID) 3886 if (chipid == BCM4330_CHIP_ID)
3625 return true; 3887 return true;
3888 if (chipid == BCM4334_CHIP_ID)
3889 return true;
3626 return false; 3890 return false;
3627} 3891}
3628 3892
@@ -3793,7 +4057,7 @@ brcmf_sdbrcm_watchdog_thread(void *data)
3793 if (!wait_for_completion_interruptible(&bus->watchdog_wait)) { 4057 if (!wait_for_completion_interruptible(&bus->watchdog_wait)) {
3794 brcmf_sdbrcm_bus_watchdog(bus); 4058 brcmf_sdbrcm_bus_watchdog(bus);
3795 /* Count the tick for reference */ 4059 /* Count the tick for reference */
3796 bus->tickcnt++; 4060 bus->sdcnt.tickcnt++;
3797 } else 4061 } else
3798 break; 4062 break;
3799 } 4063 }
@@ -3856,6 +4120,10 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
3856{ 4120{
3857 int ret; 4121 int ret;
3858 struct brcmf_sdio *bus; 4122 struct brcmf_sdio *bus;
4123 struct brcmf_bus_dcmd *dlst;
4124 u32 dngl_txglom;
4125 u32 dngl_txglomalign;
4126 u8 idx;
3859 4127
3860 brcmf_dbg(TRACE, "Enter\n"); 4128 brcmf_dbg(TRACE, "Enter\n");
3861 4129
@@ -3938,8 +4206,29 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
3938 goto fail; 4206 goto fail;
3939 } 4207 }
3940 4208
4209 brcmf_sdio_debugfs_create(bus);
3941 brcmf_dbg(INFO, "completed!!\n"); 4210 brcmf_dbg(INFO, "completed!!\n");
3942 4211
4212 /* sdio bus core specific dcmd */
4213 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
4214 dlst = kzalloc(sizeof(struct brcmf_bus_dcmd), GFP_KERNEL);
4215 if (dlst) {
4216 if (bus->ci->c_inf[idx].rev < 12) {
4217 /* for sdio core rev < 12, disable txgloming */
4218 dngl_txglom = 0;
4219 dlst->name = "bus:txglom";
4220 dlst->param = (char *)&dngl_txglom;
4221 dlst->param_len = sizeof(u32);
4222 } else {
4223 /* otherwise, set txglomalign */
4224 dngl_txglomalign = bus->sdiodev->bus_if->align;
4225 dlst->name = "bus:txglomalign";
4226 dlst->param = (char *)&dngl_txglomalign;
4227 dlst->param_len = sizeof(u32);
4228 }
4229 list_add(&dlst->list, &bus->sdiodev->bus_if->dcmd_list);
4230 }
4231
3943 /* if firmware path present try to download and bring up bus */ 4232 /* if firmware path present try to download and bring up bus */
3944 ret = brcmf_bus_start(bus->sdiodev->dev); 4233 ret = brcmf_bus_start(bus->sdiodev->dev);
3945 if (ret != 0) { 4234 if (ret != 0) {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
index f8e1f1c84d08..58155e23d220 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
@@ -403,6 +403,23 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
403 ci->c_inf[3].cib = 0x03004211; 403 ci->c_inf[3].cib = 0x03004211;
404 ci->ramsize = 0x48000; 404 ci->ramsize = 0x48000;
405 break; 405 break;
406 case BCM4334_CHIP_ID:
407 ci->c_inf[0].wrapbase = 0x18100000;
408 ci->c_inf[0].cib = 0x29004211;
409 ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
410 ci->c_inf[1].base = 0x18002000;
411 ci->c_inf[1].wrapbase = 0x18102000;
412 ci->c_inf[1].cib = 0x0d004211;
413 ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
414 ci->c_inf[2].base = 0x18004000;
415 ci->c_inf[2].wrapbase = 0x18104000;
416 ci->c_inf[2].cib = 0x13080401;
417 ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
418 ci->c_inf[3].base = 0x18003000;
419 ci->c_inf[3].wrapbase = 0x18103000;
420 ci->c_inf[3].cib = 0x07004211;
421 ci->ramsize = 0x80000;
422 break;
406 default: 423 default:
407 brcmf_dbg(ERROR, "chipid 0x%x is not supported\n", ci->chip); 424 brcmf_dbg(ERROR, "chipid 0x%x is not supported\n", ci->chip);
408 return -ENODEV; 425 return -ENODEV;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
index 6d8b7213643a..8c9345dd37d2 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
@@ -318,10 +318,6 @@
318#define IS_SIM(chippkg) \ 318#define IS_SIM(chippkg) \
319 ((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID)) 319 ((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID))
320 320
321#define PCIE(sih) (ai_get_buscoretype(sih) == PCIE_CORE_ID)
322
323#define PCI_FORCEHT(sih) (PCIE(sih) && (ai_get_chip_id(sih) == BCM4716_CHIP_ID))
324
325#ifdef DEBUG 321#ifdef DEBUG
326#define SI_MSG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__) 322#define SI_MSG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__)
327#else 323#else
@@ -473,9 +469,6 @@ ai_buscore_setup(struct si_info *sii, struct bcma_device *cc)
473 sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK; 469 sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK;
474 } 470 }
475 471
476 /* figure out buscore */
477 sii->buscore = ai_findcore(&sii->pub, PCIE_CORE_ID, 0);
478
479 return true; 472 return true;
480} 473}
481 474
@@ -483,11 +476,7 @@ static struct si_info *ai_doattach(struct si_info *sii,
483 struct bcma_bus *pbus) 476 struct bcma_bus *pbus)
484{ 477{
485 struct si_pub *sih = &sii->pub; 478 struct si_pub *sih = &sii->pub;
486 u32 w, savewin;
487 struct bcma_device *cc; 479 struct bcma_device *cc;
488 struct ssb_sprom *sprom = &pbus->sprom;
489
490 savewin = 0;
491 480
492 sii->icbus = pbus; 481 sii->icbus = pbus;
493 sii->pcibus = pbus->host_pci; 482 sii->pcibus = pbus->host_pci;
@@ -510,47 +499,7 @@ static struct si_info *ai_doattach(struct si_info *sii,
510 499
511 /* PMU specific initializations */ 500 /* PMU specific initializations */
512 if (ai_get_cccaps(sih) & CC_CAP_PMU) { 501 if (ai_get_cccaps(sih) & CC_CAP_PMU) {
513 si_pmu_init(sih);
514 (void)si_pmu_measure_alpclk(sih); 502 (void)si_pmu_measure_alpclk(sih);
515 si_pmu_res_init(sih);
516 }
517
518 /* setup the GPIO based LED powersave register */
519 w = (sprom->leddc_on_time << BCMA_CC_GPIOTIMER_ONTIME_SHIFT) |
520 (sprom->leddc_off_time << BCMA_CC_GPIOTIMER_OFFTIME_SHIFT);
521 if (w == 0)
522 w = DEFAULT_GPIOTIMERVAL;
523 ai_cc_reg(sih, offsetof(struct chipcregs, gpiotimerval),
524 ~0, w);
525
526 if (ai_get_chip_id(sih) == BCM43224_CHIP_ID) {
527 /*
528 * enable 12 mA drive strenth for 43224 and
529 * set chipControl register bit 15
530 */
531 if (ai_get_chiprev(sih) == 0) {
532 SI_MSG("Applying 43224A0 WARs\n");
533 ai_cc_reg(sih, offsetof(struct chipcregs, chipcontrol),
534 CCTRL43224_GPIO_TOGGLE,
535 CCTRL43224_GPIO_TOGGLE);
536 si_pmu_chipcontrol(sih, 0, CCTRL_43224A0_12MA_LED_DRIVE,
537 CCTRL_43224A0_12MA_LED_DRIVE);
538 }
539 if (ai_get_chiprev(sih) >= 1) {
540 SI_MSG("Applying 43224B0+ WARs\n");
541 si_pmu_chipcontrol(sih, 0, CCTRL_43224B0_12MA_LED_DRIVE,
542 CCTRL_43224B0_12MA_LED_DRIVE);
543 }
544 }
545
546 if (ai_get_chip_id(sih) == BCM4313_CHIP_ID) {
547 /*
548 * enable 12 mA drive strenth for 4313 and
549 * set chipControl register bit 1
550 */
551 SI_MSG("Applying 4313 WARs\n");
552 si_pmu_chipcontrol(sih, 0, CCTRL_4313_12MA_LED_DRIVE,
553 CCTRL_4313_12MA_LED_DRIVE);
554 } 503 }
555 504
556 return sii; 505 return sii;
@@ -589,7 +538,7 @@ void ai_detach(struct si_pub *sih)
589 struct si_pub *si_local = NULL; 538 struct si_pub *si_local = NULL;
590 memcpy(&si_local, &sih, sizeof(struct si_pub **)); 539 memcpy(&si_local, &sih, sizeof(struct si_pub **));
591 540
592 sii = (struct si_info *)sih; 541 sii = container_of(sih, struct si_info, pub);
593 542
594 if (sii == NULL) 543 if (sii == NULL)
595 return; 544 return;
@@ -597,27 +546,6 @@ void ai_detach(struct si_pub *sih)
597 kfree(sii); 546 kfree(sii);
598} 547}
599 548
600/* return index of coreid or BADIDX if not found */
601struct bcma_device *ai_findcore(struct si_pub *sih, u16 coreid, u16 coreunit)
602{
603 struct bcma_device *core;
604 struct si_info *sii;
605 uint found;
606
607 sii = (struct si_info *)sih;
608
609 found = 0;
610
611 list_for_each_entry(core, &sii->icbus->cores, list)
612 if (core->id.id == coreid) {
613 if (found == coreunit)
614 return core;
615 found++;
616 }
617
618 return NULL;
619}
620
621/* 549/*
622 * read/modify chipcommon core register. 550 * read/modify chipcommon core register.
623 */ 551 */
@@ -627,13 +555,12 @@ uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val)
627 u32 w; 555 u32 w;
628 struct si_info *sii; 556 struct si_info *sii;
629 557
630 sii = (struct si_info *)sih; 558 sii = container_of(sih, struct si_info, pub);
631 cc = sii->icbus->drv_cc.core; 559 cc = sii->icbus->drv_cc.core;
632 560
633 /* mask and set */ 561 /* mask and set */
634 if (mask || val) { 562 if (mask || val)
635 bcma_maskset32(cc, regoff, ~mask, val); 563 bcma_maskset32(cc, regoff, ~mask, val);
636 }
637 564
638 /* readback */ 565 /* readback */
639 w = bcma_read32(cc, regoff); 566 w = bcma_read32(cc, regoff);
@@ -694,12 +621,13 @@ ai_clkctl_setdelay(struct si_pub *sih, struct bcma_device *cc)
694/* initialize power control delay registers */ 621/* initialize power control delay registers */
695void ai_clkctl_init(struct si_pub *sih) 622void ai_clkctl_init(struct si_pub *sih)
696{ 623{
624 struct si_info *sii = container_of(sih, struct si_info, pub);
697 struct bcma_device *cc; 625 struct bcma_device *cc;
698 626
699 if (!(ai_get_cccaps(sih) & CC_CAP_PWR_CTL)) 627 if (!(ai_get_cccaps(sih) & CC_CAP_PWR_CTL))
700 return; 628 return;
701 629
702 cc = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0); 630 cc = sii->icbus->drv_cc.core;
703 if (cc == NULL) 631 if (cc == NULL)
704 return; 632 return;
705 633
@@ -721,7 +649,7 @@ u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih)
721 uint slowminfreq; 649 uint slowminfreq;
722 u16 fpdelay; 650 u16 fpdelay;
723 651
724 sii = (struct si_info *)sih; 652 sii = container_of(sih, struct si_info, pub);
725 if (ai_get_cccaps(sih) & CC_CAP_PMU) { 653 if (ai_get_cccaps(sih) & CC_CAP_PMU) {
726 fpdelay = si_pmu_fast_pwrup_delay(sih); 654 fpdelay = si_pmu_fast_pwrup_delay(sih);
727 return fpdelay; 655 return fpdelay;
@@ -731,7 +659,7 @@ u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih)
731 return 0; 659 return 0;
732 660
733 fpdelay = 0; 661 fpdelay = 0;
734 cc = ai_findcore(sih, CC_CORE_ID, 0); 662 cc = sii->icbus->drv_cc.core;
735 if (cc) { 663 if (cc) {
736 slowminfreq = ai_slowclk_freq(sih, false, cc); 664 slowminfreq = ai_slowclk_freq(sih, false, cc);
737 fpdelay = (((bcma_read32(cc, CHIPCREGOFFS(pll_on_delay)) + 2) 665 fpdelay = (((bcma_read32(cc, CHIPCREGOFFS(pll_on_delay)) + 2)
@@ -753,12 +681,9 @@ bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode)
753 struct si_info *sii; 681 struct si_info *sii;
754 struct bcma_device *cc; 682 struct bcma_device *cc;
755 683
756 sii = (struct si_info *)sih; 684 sii = container_of(sih, struct si_info, pub);
757
758 if (PCI_FORCEHT(sih))
759 return mode == BCMA_CLKMODE_FAST;
760 685
761 cc = ai_findcore(&sii->pub, BCMA_CORE_CHIPCOMMON, 0); 686 cc = sii->icbus->drv_cc.core;
762 bcma_core_set_clockmode(cc, mode); 687 bcma_core_set_clockmode(cc, mode);
763 return mode == BCMA_CLKMODE_FAST; 688 return mode == BCMA_CLKMODE_FAST;
764} 689}
@@ -766,16 +691,10 @@ bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode)
766void ai_pci_up(struct si_pub *sih) 691void ai_pci_up(struct si_pub *sih)
767{ 692{
768 struct si_info *sii; 693 struct si_info *sii;
769 struct bcma_device *cc;
770 694
771 sii = (struct si_info *)sih; 695 sii = container_of(sih, struct si_info, pub);
772 696
773 if (PCI_FORCEHT(sih)) { 697 if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI)
774 cc = ai_findcore(&sii->pub, BCMA_CORE_CHIPCOMMON, 0);
775 bcma_core_set_clockmode(cc, BCMA_CLKMODE_FAST);
776 }
777
778 if (PCIE(sih))
779 bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci, true); 698 bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci, true);
780} 699}
781 700
@@ -783,26 +702,20 @@ void ai_pci_up(struct si_pub *sih)
783void ai_pci_down(struct si_pub *sih) 702void ai_pci_down(struct si_pub *sih)
784{ 703{
785 struct si_info *sii; 704 struct si_info *sii;
786 struct bcma_device *cc;
787 705
788 sii = (struct si_info *)sih; 706 sii = container_of(sih, struct si_info, pub);
789 707
790 /* release FORCEHT since chip is going to "down" state */ 708 if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI)
791 if (PCI_FORCEHT(sih)) {
792 cc = ai_findcore(&sii->pub, BCMA_CORE_CHIPCOMMON, 0);
793 bcma_core_set_clockmode(cc, BCMA_CLKMODE_DYNAMIC);
794 }
795
796 if (PCIE(sih))
797 bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci, false); 709 bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci, false);
798} 710}
799 711
800/* Enable BT-COEX & Ex-PA for 4313 */ 712/* Enable BT-COEX & Ex-PA for 4313 */
801void ai_epa_4313war(struct si_pub *sih) 713void ai_epa_4313war(struct si_pub *sih)
802{ 714{
715 struct si_info *sii = container_of(sih, struct si_info, pub);
803 struct bcma_device *cc; 716 struct bcma_device *cc;
804 717
805 cc = ai_findcore(sih, CC_CORE_ID, 0); 718 cc = sii->icbus->drv_cc.core;
806 719
807 /* EPA Fix */ 720 /* EPA Fix */
808 bcma_set32(cc, CHIPCREGOFFS(gpiocontrol), GPIO_CTRL_EPA_EN_MASK); 721 bcma_set32(cc, CHIPCREGOFFS(gpiocontrol), GPIO_CTRL_EPA_EN_MASK);
@@ -814,7 +727,7 @@ bool ai_deviceremoved(struct si_pub *sih)
814 u32 w; 727 u32 w;
815 struct si_info *sii; 728 struct si_info *sii;
816 729
817 sii = (struct si_info *)sih; 730 sii = container_of(sih, struct si_info, pub);
818 731
819 if (sii->icbus->hosttype != BCMA_HOSTTYPE_PCI) 732 if (sii->icbus->hosttype != BCMA_HOSTTYPE_PCI)
820 return false; 733 return false;
@@ -825,15 +738,3 @@ bool ai_deviceremoved(struct si_pub *sih)
825 738
826 return false; 739 return false;
827} 740}
828
829uint ai_get_buscoretype(struct si_pub *sih)
830{
831 struct si_info *sii = (struct si_info *)sih;
832 return sii->buscore->id.id;
833}
834
835uint ai_get_buscorerev(struct si_pub *sih)
836{
837 struct si_info *sii = (struct si_info *)sih;
838 return sii->buscore->id.rev;
839}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
index d9f04a683bdb..89562c1fbf49 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
@@ -88,16 +88,6 @@
88#define CLKD_OTP 0x000f0000 88#define CLKD_OTP 0x000f0000
89#define CLKD_OTP_SHIFT 16 89#define CLKD_OTP_SHIFT 16
90 90
91/* Package IDs */
92#define BCM4717_PKG_ID 9 /* 4717 package id */
93#define BCM4718_PKG_ID 10 /* 4718 package id */
94#define BCM43224_FAB_SMIC 0xa /* the chip is manufactured by SMIC */
95
96/* these are router chips */
97#define BCM4716_CHIP_ID 0x4716 /* 4716 chipcommon chipid */
98#define BCM47162_CHIP_ID 47162 /* 47162 chipcommon chipid */
99#define BCM4748_CHIP_ID 0x4748 /* 4716 chipcommon chipid (OTP, RBBU) */
100
101/* dynamic clock control defines */ 91/* dynamic clock control defines */
102#define LPOMINFREQ 25000 /* low power oscillator min */ 92#define LPOMINFREQ 25000 /* low power oscillator min */
103#define LPOMAXFREQ 43000 /* low power oscillator max */ 93#define LPOMAXFREQ 43000 /* low power oscillator max */
@@ -168,7 +158,6 @@ struct si_info {
168 struct si_pub pub; /* back plane public state (must be first) */ 158 struct si_pub pub; /* back plane public state (must be first) */
169 struct bcma_bus *icbus; /* handle to soc interconnect bus */ 159 struct bcma_bus *icbus; /* handle to soc interconnect bus */
170 struct pci_dev *pcibus; /* handle to pci bus */ 160 struct pci_dev *pcibus; /* handle to pci bus */
171 struct bcma_device *buscore;
172 161
173 u32 chipst; /* chip status */ 162 u32 chipst; /* chip status */
174}; 163};
@@ -183,8 +172,6 @@ struct si_info {
183 172
184 173
185/* AMBA Interconnect exported externs */ 174/* AMBA Interconnect exported externs */
186extern struct bcma_device *ai_findcore(struct si_pub *sih,
187 u16 coreid, u16 coreunit);
188extern u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val); 175extern u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val);
189 176
190/* === exported functions === */ 177/* === exported functions === */
@@ -193,7 +180,7 @@ extern void ai_detach(struct si_pub *sih);
193extern uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val); 180extern uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val);
194extern void ai_clkctl_init(struct si_pub *sih); 181extern void ai_clkctl_init(struct si_pub *sih);
195extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih); 182extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
196extern bool ai_clkctl_cc(struct si_pub *sih, uint mode); 183extern bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode);
197extern bool ai_deviceremoved(struct si_pub *sih); 184extern bool ai_deviceremoved(struct si_pub *sih);
198 185
199extern void ai_pci_down(struct si_pub *sih); 186extern void ai_pci_down(struct si_pub *sih);
@@ -202,9 +189,6 @@ extern void ai_pci_up(struct si_pub *sih);
202/* Enable Ex-PA for 4313 */ 189/* Enable Ex-PA for 4313 */
203extern void ai_epa_4313war(struct si_pub *sih); 190extern void ai_epa_4313war(struct si_pub *sih);
204 191
205extern uint ai_get_buscoretype(struct si_pub *sih);
206extern uint ai_get_buscorerev(struct si_pub *sih);
207
208static inline u32 ai_get_cccaps(struct si_pub *sih) 192static inline u32 ai_get_cccaps(struct si_pub *sih)
209{ 193{
210 return sih->cccaps; 194 return sih->cccaps;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
index 95b5902bc4b3..01b190a25d94 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
@@ -735,10 +735,8 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
735 * a candidate for aggregation 735 * a candidate for aggregation
736 */ 736 */
737 p = pktq_ppeek(&qi->q, prec); 737 p = pktq_ppeek(&qi->q, prec);
738 /* tx_info must be checked with current p */
739 tx_info = IEEE80211_SKB_CB(p);
740
741 if (p) { 738 if (p) {
739 tx_info = IEEE80211_SKB_CB(p);
742 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && 740 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
743 ((u8) (p->priority) == tid)) { 741 ((u8) (p->priority) == tid)) {
744 plen = p->len + AMPDU_MAX_MPDU_OVERHEAD; 742 plen = p->len + AMPDU_MAX_MPDU_OVERHEAD;
@@ -759,6 +757,7 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
759 p = NULL; 757 p = NULL;
760 continue; 758 continue;
761 } 759 }
760 /* next packet fit for aggregation so dequeue */
762 p = brcmu_pktq_pdeq(&qi->q, prec); 761 p = brcmu_pktq_pdeq(&qi->q, prec);
763 } else { 762 } else {
764 p = NULL; 763 p = NULL;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
index eb77ac3cfb6b..9a4c63f927cb 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
@@ -15,7 +15,9 @@
15 */ 15 */
16 16
17#include <linux/types.h> 17#include <linux/types.h>
18#include <net/cfg80211.h>
18#include <net/mac80211.h> 19#include <net/mac80211.h>
20#include <net/regulatory.h>
19 21
20#include <defs.h> 22#include <defs.h>
21#include "pub.h" 23#include "pub.h"
@@ -23,73 +25,17 @@
23#include "main.h" 25#include "main.h"
24#include "stf.h" 26#include "stf.h"
25#include "channel.h" 27#include "channel.h"
28#include "mac80211_if.h"
26 29
27/* QDB() macro takes a dB value and converts to a quarter dB value */ 30/* QDB() macro takes a dB value and converts to a quarter dB value */
28#define QDB(n) ((n) * BRCMS_TXPWR_DB_FACTOR) 31#define QDB(n) ((n) * BRCMS_TXPWR_DB_FACTOR)
29 32
30#define LOCALE_CHAN_01_11 (1<<0)
31#define LOCALE_CHAN_12_13 (1<<1)
32#define LOCALE_CHAN_14 (1<<2)
33#define LOCALE_SET_5G_LOW_JP1 (1<<3) /* 34-48, step 2 */
34#define LOCALE_SET_5G_LOW_JP2 (1<<4) /* 34-46, step 4 */
35#define LOCALE_SET_5G_LOW1 (1<<5) /* 36-48, step 4 */
36#define LOCALE_SET_5G_LOW2 (1<<6) /* 52 */
37#define LOCALE_SET_5G_LOW3 (1<<7) /* 56-64, step 4 */
38#define LOCALE_SET_5G_MID1 (1<<8) /* 100-116, step 4 */
39#define LOCALE_SET_5G_MID2 (1<<9) /* 120-124, step 4 */
40#define LOCALE_SET_5G_MID3 (1<<10) /* 128 */
41#define LOCALE_SET_5G_HIGH1 (1<<11) /* 132-140, step 4 */
42#define LOCALE_SET_5G_HIGH2 (1<<12) /* 149-161, step 4 */
43#define LOCALE_SET_5G_HIGH3 (1<<13) /* 165 */
44#define LOCALE_CHAN_52_140_ALL (1<<14)
45#define LOCALE_SET_5G_HIGH4 (1<<15) /* 184-216 */
46
47#define LOCALE_CHAN_36_64 (LOCALE_SET_5G_LOW1 | \
48 LOCALE_SET_5G_LOW2 | \
49 LOCALE_SET_5G_LOW3)
50#define LOCALE_CHAN_52_64 (LOCALE_SET_5G_LOW2 | LOCALE_SET_5G_LOW3)
51#define LOCALE_CHAN_100_124 (LOCALE_SET_5G_MID1 | LOCALE_SET_5G_MID2)
52#define LOCALE_CHAN_100_140 (LOCALE_SET_5G_MID1 | LOCALE_SET_5G_MID2 | \
53 LOCALE_SET_5G_MID3 | LOCALE_SET_5G_HIGH1)
54#define LOCALE_CHAN_149_165 (LOCALE_SET_5G_HIGH2 | LOCALE_SET_5G_HIGH3)
55#define LOCALE_CHAN_184_216 LOCALE_SET_5G_HIGH4
56
57#define LOCALE_CHAN_01_14 (LOCALE_CHAN_01_11 | \
58 LOCALE_CHAN_12_13 | \
59 LOCALE_CHAN_14)
60
61#define LOCALE_RADAR_SET_NONE 0
62#define LOCALE_RADAR_SET_1 1
63
64#define LOCALE_RESTRICTED_NONE 0
65#define LOCALE_RESTRICTED_SET_2G_SHORT 1
66#define LOCALE_RESTRICTED_CHAN_165 2
67#define LOCALE_CHAN_ALL_5G 3
68#define LOCALE_RESTRICTED_JAPAN_LEGACY 4
69#define LOCALE_RESTRICTED_11D_2G 5
70#define LOCALE_RESTRICTED_11D_5G 6
71#define LOCALE_RESTRICTED_LOW_HI 7
72#define LOCALE_RESTRICTED_12_13_14 8
73
74#define LOCALE_2G_IDX_i 0
75#define LOCALE_5G_IDX_11 0
76#define LOCALE_MIMO_IDX_bn 0 33#define LOCALE_MIMO_IDX_bn 0
77#define LOCALE_MIMO_IDX_11n 0 34#define LOCALE_MIMO_IDX_11n 0
78 35
79/* max of BAND_5G_PWR_LVLS and 6 for 2.4 GHz */
80#define BRCMS_MAXPWR_TBL_SIZE 6
81/* max of BAND_5G_PWR_LVLS and 14 for 2.4 GHz */ 36/* max of BAND_5G_PWR_LVLS and 14 for 2.4 GHz */
82#define BRCMS_MAXPWR_MIMO_TBL_SIZE 14 37#define BRCMS_MAXPWR_MIMO_TBL_SIZE 14
83 38
84/* power level in group of 2.4GHz band channels:
85 * maxpwr[0] - CCK channels [1]
86 * maxpwr[1] - CCK channels [2-10]
87 * maxpwr[2] - CCK channels [11-14]
88 * maxpwr[3] - OFDM channels [1]
89 * maxpwr[4] - OFDM channels [2-10]
90 * maxpwr[5] - OFDM channels [11-14]
91 */
92
93/* maxpwr mapping to 5GHz band channels: 39/* maxpwr mapping to 5GHz band channels:
94 * maxpwr[0] - channels [34-48] 40 * maxpwr[0] - channels [34-48]
95 * maxpwr[1] - channels [52-60] 41 * maxpwr[1] - channels [52-60]
@@ -101,16 +47,8 @@
101 47
102#define LC(id) LOCALE_MIMO_IDX_ ## id 48#define LC(id) LOCALE_MIMO_IDX_ ## id
103 49
104#define LC_2G(id) LOCALE_2G_IDX_ ## id 50#define LOCALES(mimo2, mimo5) \
105 51 {LC(mimo2), LC(mimo5)}
106#define LC_5G(id) LOCALE_5G_IDX_ ## id
107
108#define LOCALES(band2, band5, mimo2, mimo5) \
109 {LC_2G(band2), LC_5G(band5), LC(mimo2), LC(mimo5)}
110
111/* macro to get 2.4 GHz channel group index for tx power */
112#define CHANNEL_POWER_IDX_2G_CCK(c) (((c) < 2) ? 0 : (((c) < 11) ? 1 : 2))
113#define CHANNEL_POWER_IDX_2G_OFDM(c) (((c) < 2) ? 3 : (((c) < 11) ? 4 : 5))
114 52
115/* macro to get 5 GHz channel group index for tx power */ 53/* macro to get 5 GHz channel group index for tx power */
116#define CHANNEL_POWER_IDX_5G(c) (((c) < 52) ? 0 : \ 54#define CHANNEL_POWER_IDX_5G(c) (((c) < 52) ? 0 : \
@@ -118,18 +56,37 @@
118 (((c) < 100) ? 2 : \ 56 (((c) < 100) ? 2 : \
119 (((c) < 149) ? 3 : 4)))) 57 (((c) < 149) ? 3 : 4))))
120 58
121#define ISDFS_EU(fl) (((fl) & BRCMS_DFS_EU) == BRCMS_DFS_EU) 59#define BRCM_2GHZ_2412_2462 REG_RULE(2412-10, 2462+10, 40, 0, 19, 0)
122 60#define BRCM_2GHZ_2467_2472 REG_RULE(2467-10, 2472+10, 20, 0, 19, \
123struct brcms_cm_band { 61 NL80211_RRF_PASSIVE_SCAN | \
124 /* struct locale_info flags */ 62 NL80211_RRF_NO_IBSS)
125 u8 locale_flags; 63
126 /* List of valid channels in the country */ 64#define BRCM_5GHZ_5180_5240 REG_RULE(5180-10, 5240+10, 40, 0, 21, \
127 struct brcms_chanvec valid_channels; 65 NL80211_RRF_PASSIVE_SCAN | \
128 /* List of restricted use channels */ 66 NL80211_RRF_NO_IBSS)
129 const struct brcms_chanvec *restricted_channels; 67#define BRCM_5GHZ_5260_5320 REG_RULE(5260-10, 5320+10, 40, 0, 21, \
130 /* List of radar sensitive channels */ 68 NL80211_RRF_PASSIVE_SCAN | \
131 const struct brcms_chanvec *radar_channels; 69 NL80211_RRF_DFS | \
132 u8 PAD[8]; 70 NL80211_RRF_NO_IBSS)
71#define BRCM_5GHZ_5500_5700 REG_RULE(5500-10, 5700+10, 40, 0, 21, \
72 NL80211_RRF_PASSIVE_SCAN | \
73 NL80211_RRF_DFS | \
74 NL80211_RRF_NO_IBSS)
75#define BRCM_5GHZ_5745_5825 REG_RULE(5745-10, 5825+10, 40, 0, 21, \
76 NL80211_RRF_PASSIVE_SCAN | \
77 NL80211_RRF_NO_IBSS)
78
79static const struct ieee80211_regdomain brcms_regdom_x2 = {
80 .n_reg_rules = 7,
81 .alpha2 = "X2",
82 .reg_rules = {
83 BRCM_2GHZ_2412_2462,
84 BRCM_2GHZ_2467_2472,
85 BRCM_5GHZ_5180_5240,
86 BRCM_5GHZ_5260_5320,
87 BRCM_5GHZ_5500_5700,
88 BRCM_5GHZ_5745_5825,
89 }
133}; 90};
134 91
135 /* locale per-channel tx power limits for MIMO frames 92 /* locale per-channel tx power limits for MIMO frames
@@ -141,337 +98,23 @@ struct locale_mimo_info {
141 s8 maxpwr20[BRCMS_MAXPWR_MIMO_TBL_SIZE]; 98 s8 maxpwr20[BRCMS_MAXPWR_MIMO_TBL_SIZE];
142 /* tx 40 MHz power limits, qdBm units */ 99 /* tx 40 MHz power limits, qdBm units */
143 s8 maxpwr40[BRCMS_MAXPWR_MIMO_TBL_SIZE]; 100 s8 maxpwr40[BRCMS_MAXPWR_MIMO_TBL_SIZE];
144 u8 flags;
145}; 101};
146 102
147/* Country names and abbreviations with locale defined from ISO 3166 */ 103/* Country names and abbreviations with locale defined from ISO 3166 */
148struct country_info { 104struct country_info {
149 const u8 locale_2G; /* 2.4G band locale */
150 const u8 locale_5G; /* 5G band locale */
151 const u8 locale_mimo_2G; /* 2.4G mimo info */ 105 const u8 locale_mimo_2G; /* 2.4G mimo info */
152 const u8 locale_mimo_5G; /* 5G mimo info */ 106 const u8 locale_mimo_5G; /* 5G mimo info */
153}; 107};
154 108
109struct brcms_regd {
110 struct country_info country;
111 const struct ieee80211_regdomain *regdomain;
112};
113
155struct brcms_cm_info { 114struct brcms_cm_info {
156 struct brcms_pub *pub; 115 struct brcms_pub *pub;
157 struct brcms_c_info *wlc; 116 struct brcms_c_info *wlc;
158 char srom_ccode[BRCM_CNTRY_BUF_SZ]; /* Country Code in SROM */ 117 const struct brcms_regd *world_regd;
159 uint srom_regrev; /* Regulatory Rev for the SROM ccode */
160 const struct country_info *country; /* current country def */
161 char ccode[BRCM_CNTRY_BUF_SZ]; /* current internal Country Code */
162 uint regrev; /* current Regulatory Revision */
163 char country_abbrev[BRCM_CNTRY_BUF_SZ]; /* current advertised ccode */
164 /* per-band state (one per phy/radio) */
165 struct brcms_cm_band bandstate[MAXBANDS];
166 /* quiet channels currently for radar sensitivity or 11h support */
167 /* channels on which we cannot transmit */
168 struct brcms_chanvec quiet_channels;
169};
170
171/* locale channel and power info. */
172struct locale_info {
173 u32 valid_channels;
174 /* List of radar sensitive channels */
175 u8 radar_channels;
176 /* List of channels used only if APs are detected */
177 u8 restricted_channels;
178 /* Max tx pwr in qdBm for each sub-band */
179 s8 maxpwr[BRCMS_MAXPWR_TBL_SIZE];
180 /* Country IE advertised max tx pwr in dBm per sub-band */
181 s8 pub_maxpwr[BAND_5G_PWR_LVLS];
182 u8 flags;
183};
184
185/* Regulatory Matrix Spreadsheet (CLM) MIMO v3.7.9 */
186
187/*
188 * Some common channel sets
189 */
190
191/* No channels */
192static const struct brcms_chanvec chanvec_none = {
193 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
194 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
195 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
196 0x00, 0x00, 0x00, 0x00}
197};
198
199/* All 2.4 GHz HW channels */
200static const struct brcms_chanvec chanvec_all_2G = {
201 {0xfe, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
202 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
203 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
204 0x00, 0x00, 0x00, 0x00}
205};
206
207/* All 5 GHz HW channels */
208static const struct brcms_chanvec chanvec_all_5G = {
209 {0x00, 0x00, 0x00, 0x00, 0x54, 0x55, 0x11, 0x11,
210 0x01, 0x00, 0x00, 0x00, 0x10, 0x11, 0x11, 0x11,
211 0x11, 0x11, 0x20, 0x22, 0x22, 0x00, 0x00, 0x11,
212 0x11, 0x11, 0x11, 0x01}
213};
214
215/*
216 * Radar channel sets
217 */
218
219/* Channels 52 - 64, 100 - 140 */
220static const struct brcms_chanvec radar_set1 = {
221 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x11, /* 52 - 60 */
222 0x01, 0x00, 0x00, 0x00, 0x10, 0x11, 0x11, 0x11, /* 64, 100 - 124 */
223 0x11, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 128 - 140 */
224 0x00, 0x00, 0x00, 0x00}
225};
226
227/*
228 * Restricted channel sets
229 */
230
231/* Channels 34, 38, 42, 46 */
232static const struct brcms_chanvec restricted_set_japan_legacy = {
233 {0x00, 0x00, 0x00, 0x00, 0x44, 0x44, 0x00, 0x00,
234 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
235 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
236 0x00, 0x00, 0x00, 0x00}
237};
238
239/* Channels 12, 13 */
240static const struct brcms_chanvec restricted_set_2g_short = {
241 {0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
242 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
243 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
244 0x00, 0x00, 0x00, 0x00}
245};
246
247/* Channel 165 */
248static const struct brcms_chanvec restricted_chan_165 = {
249 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
250 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
251 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
252 0x00, 0x00, 0x00, 0x00}
253};
254
255/* Channels 36 - 48 & 149 - 165 */
256static const struct brcms_chanvec restricted_low_hi = {
257 {0x00, 0x00, 0x00, 0x00, 0x10, 0x11, 0x01, 0x00,
258 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
259 0x00, 0x00, 0x20, 0x22, 0x22, 0x00, 0x00, 0x00,
260 0x00, 0x00, 0x00, 0x00}
261};
262
263/* Channels 12 - 14 */
264static const struct brcms_chanvec restricted_set_12_13_14 = {
265 {0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
266 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
267 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
268 0x00, 0x00, 0x00, 0x00}
269};
270
271/* global memory to provide working buffer for expanded locale */
272
273static const struct brcms_chanvec *g_table_radar_set[] = {
274 &chanvec_none,
275 &radar_set1
276};
277
278static const struct brcms_chanvec *g_table_restricted_chan[] = {
279 &chanvec_none, /* restricted_set_none */
280 &restricted_set_2g_short,
281 &restricted_chan_165,
282 &chanvec_all_5G,
283 &restricted_set_japan_legacy,
284 &chanvec_all_2G, /* restricted_set_11d_2G */
285 &chanvec_all_5G, /* restricted_set_11d_5G */
286 &restricted_low_hi,
287 &restricted_set_12_13_14
288};
289
290static const struct brcms_chanvec locale_2g_01_11 = {
291 {0xfe, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
292 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
293 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
294 0x00, 0x00, 0x00, 0x00}
295};
296
297static const struct brcms_chanvec locale_2g_12_13 = {
298 {0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
299 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
300 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
301 0x00, 0x00, 0x00, 0x00}
302};
303
304static const struct brcms_chanvec locale_2g_14 = {
305 {0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
306 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
307 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
308 0x00, 0x00, 0x00, 0x00}
309};
310
311static const struct brcms_chanvec locale_5g_LOW_JP1 = {
312 {0x00, 0x00, 0x00, 0x00, 0x54, 0x55, 0x01, 0x00,
313 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
314 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
315 0x00, 0x00, 0x00, 0x00}
316};
317
318static const struct brcms_chanvec locale_5g_LOW_JP2 = {
319 {0x00, 0x00, 0x00, 0x00, 0x44, 0x44, 0x00, 0x00,
320 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
321 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
322 0x00, 0x00, 0x00, 0x00}
323};
324
325static const struct brcms_chanvec locale_5g_LOW1 = {
326 {0x00, 0x00, 0x00, 0x00, 0x10, 0x11, 0x01, 0x00,
327 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
328 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
329 0x00, 0x00, 0x00, 0x00}
330};
331
332static const struct brcms_chanvec locale_5g_LOW2 = {
333 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00,
334 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
336 0x00, 0x00, 0x00, 0x00}
337};
338
339static const struct brcms_chanvec locale_5g_LOW3 = {
340 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
341 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
342 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
343 0x00, 0x00, 0x00, 0x00}
344};
345
346static const struct brcms_chanvec locale_5g_MID1 = {
347 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
348 0x00, 0x00, 0x00, 0x00, 0x10, 0x11, 0x11, 0x00,
349 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
350 0x00, 0x00, 0x00, 0x00}
351};
352
353static const struct brcms_chanvec locale_5g_MID2 = {
354 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
355 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
356 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
357 0x00, 0x00, 0x00, 0x00}
358};
359
360static const struct brcms_chanvec locale_5g_MID3 = {
361 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
362 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
363 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
364 0x00, 0x00, 0x00, 0x00}
365};
366
367static const struct brcms_chanvec locale_5g_HIGH1 = {
368 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
369 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
370 0x10, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
371 0x00, 0x00, 0x00, 0x00}
372};
373
374static const struct brcms_chanvec locale_5g_HIGH2 = {
375 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x20, 0x22, 0x02, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00}
379};
380
381static const struct brcms_chanvec locale_5g_HIGH3 = {
382 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
383 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
384 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
385 0x00, 0x00, 0x00, 0x00}
386};
387
388static const struct brcms_chanvec locale_5g_52_140_ALL = {
389 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x11,
390 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
391 0x11, 0x11, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
392 0x00, 0x00, 0x00, 0x00}
393};
394
395static const struct brcms_chanvec locale_5g_HIGH4 = {
396 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
397 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
398 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
399 0x11, 0x11, 0x11, 0x11}
400};
401
402static const struct brcms_chanvec *g_table_locale_base[] = {
403 &locale_2g_01_11,
404 &locale_2g_12_13,
405 &locale_2g_14,
406 &locale_5g_LOW_JP1,
407 &locale_5g_LOW_JP2,
408 &locale_5g_LOW1,
409 &locale_5g_LOW2,
410 &locale_5g_LOW3,
411 &locale_5g_MID1,
412 &locale_5g_MID2,
413 &locale_5g_MID3,
414 &locale_5g_HIGH1,
415 &locale_5g_HIGH2,
416 &locale_5g_HIGH3,
417 &locale_5g_52_140_ALL,
418 &locale_5g_HIGH4
419};
420
421static void brcms_c_locale_add_channels(struct brcms_chanvec *target,
422 const struct brcms_chanvec *channels)
423{
424 u8 i;
425 for (i = 0; i < sizeof(struct brcms_chanvec); i++)
426 target->vec[i] |= channels->vec[i];
427}
428
429static void brcms_c_locale_get_channels(const struct locale_info *locale,
430 struct brcms_chanvec *channels)
431{
432 u8 i;
433
434 memset(channels, 0, sizeof(struct brcms_chanvec));
435
436 for (i = 0; i < ARRAY_SIZE(g_table_locale_base); i++) {
437 if (locale->valid_channels & (1 << i))
438 brcms_c_locale_add_channels(channels,
439 g_table_locale_base[i]);
440 }
441}
442
443/*
444 * Locale Definitions - 2.4 GHz
445 */
446static const struct locale_info locale_i = { /* locale i. channel 1 - 13 */
447 LOCALE_CHAN_01_11 | LOCALE_CHAN_12_13,
448 LOCALE_RADAR_SET_NONE,
449 LOCALE_RESTRICTED_SET_2G_SHORT,
450 {QDB(19), QDB(19), QDB(19),
451 QDB(19), QDB(19), QDB(19)},
452 {20, 20, 20, 0},
453 BRCMS_EIRP
454};
455
456/*
457 * Locale Definitions - 5 GHz
458 */
459static const struct locale_info locale_11 = {
460 /* locale 11. channel 36 - 48, 52 - 64, 100 - 140, 149 - 165 */
461 LOCALE_CHAN_36_64 | LOCALE_CHAN_100_140 | LOCALE_CHAN_149_165,
462 LOCALE_RADAR_SET_1,
463 LOCALE_RESTRICTED_NONE,
464 {QDB(21), QDB(21), QDB(21), QDB(21), QDB(21)},
465 {23, 23, 23, 30, 30},
466 BRCMS_EIRP | BRCMS_DFS_EU
467};
468
469static const struct locale_info *g_locale_2g_table[] = {
470 &locale_i
471};
472
473static const struct locale_info *g_locale_5g_table[] = {
474 &locale_11
475}; 118};
476 119
477/* 120/*
@@ -484,7 +127,6 @@ static const struct locale_mimo_info locale_bn = {
484 {0, 0, QDB(13), QDB(13), QDB(13), 127 {0, 0, QDB(13), QDB(13), QDB(13),
485 QDB(13), QDB(13), QDB(13), QDB(13), QDB(13), 128 QDB(13), QDB(13), QDB(13), QDB(13), QDB(13),
486 QDB(13), 0, 0}, 129 QDB(13), 0, 0},
487 0
488}; 130};
489 131
490static const struct locale_mimo_info *g_mimo_2g_table[] = { 132static const struct locale_mimo_info *g_mimo_2g_table[] = {
@@ -497,114 +139,20 @@ static const struct locale_mimo_info *g_mimo_2g_table[] = {
497static const struct locale_mimo_info locale_11n = { 139static const struct locale_mimo_info locale_11n = {
498 { /* 12.5 dBm */ 50, 50, 50, QDB(15), QDB(15)}, 140 { /* 12.5 dBm */ 50, 50, 50, QDB(15), QDB(15)},
499 {QDB(14), QDB(15), QDB(15), QDB(15), QDB(15)}, 141 {QDB(14), QDB(15), QDB(15), QDB(15), QDB(15)},
500 0
501}; 142};
502 143
503static const struct locale_mimo_info *g_mimo_5g_table[] = { 144static const struct locale_mimo_info *g_mimo_5g_table[] = {
504 &locale_11n 145 &locale_11n
505}; 146};
506 147
507static const struct { 148static const struct brcms_regd cntry_locales[] = {
508 char abbrev[BRCM_CNTRY_BUF_SZ]; /* country abbreviation */ 149 /* Worldwide RoW 2, must always be at index 0 */
509 struct country_info country;
510} cntry_locales[] = {
511 { 150 {
512 "X2", LOCALES(i, 11, bn, 11n)}, /* Worldwide RoW 2 */ 151 .country = LOCALES(bn, 11n),
513}; 152 .regdomain = &brcms_regdom_x2,
514 153 },
515#ifdef SUPPORT_40MHZ
516/* 20MHz channel info for 40MHz pairing support */
517struct chan20_info {
518 u8 sb;
519 u8 adj_sbs;
520}; 154};
521 155
522/* indicates adjacent channels that are allowed for a 40 Mhz channel and
523 * those that permitted by the HT
524 */
525struct chan20_info chan20_info[] = {
526 /* 11b/11g */
527/* 0 */ {1, (CH_UPPER_SB | CH_EWA_VALID)},
528/* 1 */ {2, (CH_UPPER_SB | CH_EWA_VALID)},
529/* 2 */ {3, (CH_UPPER_SB | CH_EWA_VALID)},
530/* 3 */ {4, (CH_UPPER_SB | CH_EWA_VALID)},
531/* 4 */ {5, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)},
532/* 5 */ {6, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)},
533/* 6 */ {7, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)},
534/* 7 */ {8, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)},
535/* 8 */ {9, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)},
536/* 9 */ {10, (CH_LOWER_SB | CH_EWA_VALID)},
537/* 10 */ {11, (CH_LOWER_SB | CH_EWA_VALID)},
538/* 11 */ {12, (CH_LOWER_SB)},
539/* 12 */ {13, (CH_LOWER_SB)},
540/* 13 */ {14, (CH_LOWER_SB)},
541
542/* 11a japan high */
543/* 14 */ {34, (CH_UPPER_SB)},
544/* 15 */ {38, (CH_LOWER_SB)},
545/* 16 */ {42, (CH_LOWER_SB)},
546/* 17 */ {46, (CH_LOWER_SB)},
547
548/* 11a usa low */
549/* 18 */ {36, (CH_UPPER_SB | CH_EWA_VALID)},
550/* 19 */ {40, (CH_LOWER_SB | CH_EWA_VALID)},
551/* 20 */ {44, (CH_UPPER_SB | CH_EWA_VALID)},
552/* 21 */ {48, (CH_LOWER_SB | CH_EWA_VALID)},
553/* 22 */ {52, (CH_UPPER_SB | CH_EWA_VALID)},
554/* 23 */ {56, (CH_LOWER_SB | CH_EWA_VALID)},
555/* 24 */ {60, (CH_UPPER_SB | CH_EWA_VALID)},
556/* 25 */ {64, (CH_LOWER_SB | CH_EWA_VALID)},
557
558/* 11a Europe */
559/* 26 */ {100, (CH_UPPER_SB | CH_EWA_VALID)},
560/* 27 */ {104, (CH_LOWER_SB | CH_EWA_VALID)},
561/* 28 */ {108, (CH_UPPER_SB | CH_EWA_VALID)},
562/* 29 */ {112, (CH_LOWER_SB | CH_EWA_VALID)},
563/* 30 */ {116, (CH_UPPER_SB | CH_EWA_VALID)},
564/* 31 */ {120, (CH_LOWER_SB | CH_EWA_VALID)},
565/* 32 */ {124, (CH_UPPER_SB | CH_EWA_VALID)},
566/* 33 */ {128, (CH_LOWER_SB | CH_EWA_VALID)},
567/* 34 */ {132, (CH_UPPER_SB | CH_EWA_VALID)},
568/* 35 */ {136, (CH_LOWER_SB | CH_EWA_VALID)},
569/* 36 */ {140, (CH_LOWER_SB)},
570
571/* 11a usa high, ref5 only */
572/* The 0x80 bit in pdiv means these are REF5, other entries are REF20 */
573/* 37 */ {149, (CH_UPPER_SB | CH_EWA_VALID)},
574/* 38 */ {153, (CH_LOWER_SB | CH_EWA_VALID)},
575/* 39 */ {157, (CH_UPPER_SB | CH_EWA_VALID)},
576/* 40 */ {161, (CH_LOWER_SB | CH_EWA_VALID)},
577/* 41 */ {165, (CH_LOWER_SB)},
578
579/* 11a japan */
580/* 42 */ {184, (CH_UPPER_SB)},
581/* 43 */ {188, (CH_LOWER_SB)},
582/* 44 */ {192, (CH_UPPER_SB)},
583/* 45 */ {196, (CH_LOWER_SB)},
584/* 46 */ {200, (CH_UPPER_SB)},
585/* 47 */ {204, (CH_LOWER_SB)},
586/* 48 */ {208, (CH_UPPER_SB)},
587/* 49 */ {212, (CH_LOWER_SB)},
588/* 50 */ {216, (CH_LOWER_SB)}
589};
590#endif /* SUPPORT_40MHZ */
591
592static const struct locale_info *brcms_c_get_locale_2g(u8 locale_idx)
593{
594 if (locale_idx >= ARRAY_SIZE(g_locale_2g_table))
595 return NULL; /* error condition */
596
597 return g_locale_2g_table[locale_idx];
598}
599
600static const struct locale_info *brcms_c_get_locale_5g(u8 locale_idx)
601{
602 if (locale_idx >= ARRAY_SIZE(g_locale_5g_table))
603 return NULL; /* error condition */
604
605 return g_locale_5g_table[locale_idx];
606}
607
608static const struct locale_mimo_info *brcms_c_get_mimo_2g(u8 locale_idx) 156static const struct locale_mimo_info *brcms_c_get_mimo_2g(u8 locale_idx)
609{ 157{
610 if (locale_idx >= ARRAY_SIZE(g_mimo_2g_table)) 158 if (locale_idx >= ARRAY_SIZE(g_mimo_2g_table))
@@ -621,13 +169,6 @@ static const struct locale_mimo_info *brcms_c_get_mimo_5g(u8 locale_idx)
621 return g_mimo_5g_table[locale_idx]; 169 return g_mimo_5g_table[locale_idx];
622} 170}
623 171
624static int
625brcms_c_country_aggregate_map(struct brcms_cm_info *wlc_cm, const char *ccode,
626 char *mapped_ccode, uint *mapped_regrev)
627{
628 return false;
629}
630
631/* 172/*
632 * Indicates whether the country provided is valid to pass 173 * Indicates whether the country provided is valid to pass
633 * to cfg80211 or not. 174 * to cfg80211 or not.
@@ -662,155 +203,24 @@ static bool brcms_c_country_valid(const char *ccode)
662 return true; 203 return true;
663} 204}
664 205
665/* Lookup a country info structure from a null terminated country 206static const struct brcms_regd *brcms_world_regd(const char *regdom, int len)
666 * abbreviation and regrev directly with no translation.
667 */
668static const struct country_info *
669brcms_c_country_lookup_direct(const char *ccode, uint regrev)
670{ 207{
671 uint size, i; 208 const struct brcms_regd *regd = NULL;
672 209 int i;
673 /* Should just return 0 for single locale driver. */
674 /* Keep it this way in case we add more locales. (for now anyway) */
675
676 /*
677 * all other country def arrays are for regrev == 0, so if
678 * regrev is non-zero, fail
679 */
680 if (regrev > 0)
681 return NULL;
682
683 /* find matched table entry from country code */
684 size = ARRAY_SIZE(cntry_locales);
685 for (i = 0; i < size; i++) {
686 if (strcmp(ccode, cntry_locales[i].abbrev) == 0)
687 return &cntry_locales[i].country;
688 }
689 return NULL;
690}
691
692static const struct country_info *
693brcms_c_countrycode_map(struct brcms_cm_info *wlc_cm, const char *ccode,
694 char *mapped_ccode, uint *mapped_regrev)
695{
696 struct brcms_c_info *wlc = wlc_cm->wlc;
697 const struct country_info *country;
698 uint srom_regrev = wlc_cm->srom_regrev;
699 const char *srom_ccode = wlc_cm->srom_ccode;
700 int mapped;
701
702 /* check for currently supported ccode size */
703 if (strlen(ccode) > (BRCM_CNTRY_BUF_SZ - 1)) {
704 wiphy_err(wlc->wiphy, "wl%d: %s: ccode \"%s\" too long for "
705 "match\n", wlc->pub->unit, __func__, ccode);
706 return NULL;
707 }
708
709 /* default mapping is the given ccode and regrev 0 */
710 strncpy(mapped_ccode, ccode, BRCM_CNTRY_BUF_SZ);
711 *mapped_regrev = 0;
712
713 /* If the desired country code matches the srom country code,
714 * then the mapped country is the srom regulatory rev.
715 * Otherwise look for an aggregate mapping.
716 */
717 if (!strcmp(srom_ccode, ccode)) {
718 *mapped_regrev = srom_regrev;
719 mapped = 0;
720 wiphy_err(wlc->wiphy, "srom_code == ccode %s\n", __func__);
721 } else {
722 mapped =
723 brcms_c_country_aggregate_map(wlc_cm, ccode, mapped_ccode,
724 mapped_regrev);
725 }
726
727 /* find the matching built-in country definition */
728 country = brcms_c_country_lookup_direct(mapped_ccode, *mapped_regrev);
729
730 /* if there is not an exact rev match, default to rev zero */
731 if (country == NULL && *mapped_regrev != 0) {
732 *mapped_regrev = 0;
733 country =
734 brcms_c_country_lookup_direct(mapped_ccode, *mapped_regrev);
735 }
736
737 return country;
738}
739
740/* Lookup a country info structure from a null terminated country code
741 * The lookup is case sensitive.
742 */
743static const struct country_info *
744brcms_c_country_lookup(struct brcms_c_info *wlc, const char *ccode)
745{
746 const struct country_info *country;
747 char mapped_ccode[BRCM_CNTRY_BUF_SZ];
748 uint mapped_regrev;
749
750 /*
751 * map the country code to a built-in country code, regrev, and
752 * country_info struct
753 */
754 country = brcms_c_countrycode_map(wlc->cmi, ccode, mapped_ccode,
755 &mapped_regrev);
756
757 return country;
758}
759
760/*
761 * reset the quiet channels vector to the union
762 * of the restricted and radar channel sets
763 */
764static void brcms_c_quiet_channels_reset(struct brcms_cm_info *wlc_cm)
765{
766 struct brcms_c_info *wlc = wlc_cm->wlc;
767 uint i, j;
768 struct brcms_band *band;
769 const struct brcms_chanvec *chanvec;
770
771 memset(&wlc_cm->quiet_channels, 0, sizeof(struct brcms_chanvec));
772
773 band = wlc->band;
774 for (i = 0; i < wlc->pub->_nbands;
775 i++, band = wlc->bandstate[OTHERBANDUNIT(wlc)]) {
776
777 /* initialize quiet channels for restricted channels */
778 chanvec = wlc_cm->bandstate[band->bandunit].restricted_channels;
779 for (j = 0; j < sizeof(struct brcms_chanvec); j++)
780 wlc_cm->quiet_channels.vec[j] |= chanvec->vec[j];
781 210
211 for (i = 0; i < ARRAY_SIZE(cntry_locales); i++) {
212 if (!strncmp(regdom, cntry_locales[i].regdomain->alpha2, len)) {
213 regd = &cntry_locales[i];
214 break;
215 }
782 } 216 }
783}
784
785/* Is the channel valid for the current locale and current band? */
786static bool brcms_c_valid_channel20(struct brcms_cm_info *wlc_cm, uint val)
787{
788 struct brcms_c_info *wlc = wlc_cm->wlc;
789 217
790 return ((val < MAXCHANNEL) && 218 return regd;
791 isset(wlc_cm->bandstate[wlc->band->bandunit].valid_channels.vec,
792 val));
793} 219}
794 220
795/* Is the channel valid for the current locale and specified band? */ 221static const struct brcms_regd *brcms_default_world_regd(void)
796static bool brcms_c_valid_channel20_in_band(struct brcms_cm_info *wlc_cm,
797 uint bandunit, uint val)
798{
799 return ((val < MAXCHANNEL)
800 && isset(wlc_cm->bandstate[bandunit].valid_channels.vec, val));
801}
802
803/* Is the channel valid for the current locale? (but don't consider channels not
804 * available due to bandlocking)
805 */
806static bool brcms_c_valid_channel20_db(struct brcms_cm_info *wlc_cm, uint val)
807{ 222{
808 struct brcms_c_info *wlc = wlc_cm->wlc; 223 return &cntry_locales[0];
809
810 return brcms_c_valid_channel20(wlc->cmi, val) ||
811 (!wlc->bandlocked
812 && brcms_c_valid_channel20_in_band(wlc->cmi,
813 OTHERBANDUNIT(wlc), val));
814} 224}
815 225
816/* JP, J1 - J10 are Japan ccodes */ 226/* JP, J1 - J10 are Japan ccodes */
@@ -820,12 +230,6 @@ static bool brcms_c_japan_ccode(const char *ccode)
820 (ccode[1] == 'P' || (ccode[1] >= '1' && ccode[1] <= '9'))); 230 (ccode[1] == 'P' || (ccode[1] >= '1' && ccode[1] <= '9')));
821} 231}
822 232
823/* Returns true if currently set country is Japan or variant */
824static bool brcms_c_japan(struct brcms_c_info *wlc)
825{
826 return brcms_c_japan_ccode(wlc->cmi->country_abbrev);
827}
828
829static void 233static void
830brcms_c_channel_min_txpower_limits_with_local_constraint( 234brcms_c_channel_min_txpower_limits_with_local_constraint(
831 struct brcms_cm_info *wlc_cm, struct txpwr_limits *txpwr, 235 struct brcms_cm_info *wlc_cm, struct txpwr_limits *txpwr,
@@ -901,140 +305,16 @@ brcms_c_channel_min_txpower_limits_with_local_constraint(
901 305
902} 306}
903 307
904/* Update the radio state (enable/disable) and tx power targets
905 * based on a new set of channel/regulatory information
906 */
907static void brcms_c_channels_commit(struct brcms_cm_info *wlc_cm)
908{
909 struct brcms_c_info *wlc = wlc_cm->wlc;
910 uint chan;
911 struct txpwr_limits txpwr;
912
913 /* search for the existence of any valid channel */
914 for (chan = 0; chan < MAXCHANNEL; chan++) {
915 if (brcms_c_valid_channel20_db(wlc->cmi, chan))
916 break;
917 }
918 if (chan == MAXCHANNEL)
919 chan = INVCHANNEL;
920
921 /*
922 * based on the channel search above, set or
923 * clear WL_RADIO_COUNTRY_DISABLE.
924 */
925 if (chan == INVCHANNEL) {
926 /*
927 * country/locale with no valid channels, set
928 * the radio disable bit
929 */
930 mboolset(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE);
931 wiphy_err(wlc->wiphy, "wl%d: %s: no valid channel for \"%s\" "
932 "nbands %d bandlocked %d\n", wlc->pub->unit,
933 __func__, wlc_cm->country_abbrev, wlc->pub->_nbands,
934 wlc->bandlocked);
935 } else if (mboolisset(wlc->pub->radio_disabled,
936 WL_RADIO_COUNTRY_DISABLE)) {
937 /*
938 * country/locale with valid channel, clear
939 * the radio disable bit
940 */
941 mboolclr(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE);
942 }
943
944 /*
945 * Now that the country abbreviation is set, if the radio supports 2G,
946 * then set channel 14 restrictions based on the new locale.
947 */
948 if (wlc->pub->_nbands > 1 || wlc->band->bandtype == BRCM_BAND_2G)
949 wlc_phy_chanspec_ch14_widefilter_set(wlc->band->pi,
950 brcms_c_japan(wlc) ? true :
951 false);
952
953 if (wlc->pub->up && chan != INVCHANNEL) {
954 brcms_c_channel_reg_limits(wlc_cm, wlc->chanspec, &txpwr);
955 brcms_c_channel_min_txpower_limits_with_local_constraint(wlc_cm,
956 &txpwr, BRCMS_TXPWR_MAX);
957 wlc_phy_txpower_limit_set(wlc->band->pi, &txpwr, wlc->chanspec);
958 }
959}
960
961static int
962brcms_c_channels_init(struct brcms_cm_info *wlc_cm,
963 const struct country_info *country)
964{
965 struct brcms_c_info *wlc = wlc_cm->wlc;
966 uint i, j;
967 struct brcms_band *band;
968 const struct locale_info *li;
969 struct brcms_chanvec sup_chan;
970 const struct locale_mimo_info *li_mimo;
971
972 band = wlc->band;
973 for (i = 0; i < wlc->pub->_nbands;
974 i++, band = wlc->bandstate[OTHERBANDUNIT(wlc)]) {
975
976 li = (band->bandtype == BRCM_BAND_5G) ?
977 brcms_c_get_locale_5g(country->locale_5G) :
978 brcms_c_get_locale_2g(country->locale_2G);
979 wlc_cm->bandstate[band->bandunit].locale_flags = li->flags;
980 li_mimo = (band->bandtype == BRCM_BAND_5G) ?
981 brcms_c_get_mimo_5g(country->locale_mimo_5G) :
982 brcms_c_get_mimo_2g(country->locale_mimo_2G);
983
984 /* merge the mimo non-mimo locale flags */
985 wlc_cm->bandstate[band->bandunit].locale_flags |=
986 li_mimo->flags;
987
988 wlc_cm->bandstate[band->bandunit].restricted_channels =
989 g_table_restricted_chan[li->restricted_channels];
990 wlc_cm->bandstate[band->bandunit].radar_channels =
991 g_table_radar_set[li->radar_channels];
992
993 /*
994 * set the channel availability, masking out the channels
995 * that may not be supported on this phy.
996 */
997 wlc_phy_chanspec_band_validch(band->pi, band->bandtype,
998 &sup_chan);
999 brcms_c_locale_get_channels(li,
1000 &wlc_cm->bandstate[band->bandunit].
1001 valid_channels);
1002 for (j = 0; j < sizeof(struct brcms_chanvec); j++)
1003 wlc_cm->bandstate[band->bandunit].valid_channels.
1004 vec[j] &= sup_chan.vec[j];
1005 }
1006
1007 brcms_c_quiet_channels_reset(wlc_cm);
1008 brcms_c_channels_commit(wlc_cm);
1009
1010 return 0;
1011}
1012
1013/* 308/*
1014 * set the driver's current country and regulatory information 309 * set the driver's current country and regulatory information
1015 * using a country code as the source. Look up built in country 310 * using a country code as the source. Look up built in country
1016 * information found with the country code. 311 * information found with the country code.
1017 */ 312 */
1018static void 313static void
1019brcms_c_set_country_common(struct brcms_cm_info *wlc_cm, 314brcms_c_set_country(struct brcms_cm_info *wlc_cm,
1020 const char *country_abbrev, 315 const struct brcms_regd *regd)
1021 const char *ccode, uint regrev,
1022 const struct country_info *country)
1023{ 316{
1024 const struct locale_info *locale;
1025 struct brcms_c_info *wlc = wlc_cm->wlc; 317 struct brcms_c_info *wlc = wlc_cm->wlc;
1026 char prev_country_abbrev[BRCM_CNTRY_BUF_SZ];
1027
1028 /* save current country state */
1029 wlc_cm->country = country;
1030
1031 memset(&prev_country_abbrev, 0, BRCM_CNTRY_BUF_SZ);
1032 strncpy(prev_country_abbrev, wlc_cm->country_abbrev,
1033 BRCM_CNTRY_BUF_SZ - 1);
1034
1035 strncpy(wlc_cm->country_abbrev, country_abbrev, BRCM_CNTRY_BUF_SZ - 1);
1036 strncpy(wlc_cm->ccode, ccode, BRCM_CNTRY_BUF_SZ - 1);
1037 wlc_cm->regrev = regrev;
1038 318
1039 if ((wlc->pub->_n_enab & SUPPORT_11N) != 319 if ((wlc->pub->_n_enab & SUPPORT_11N) !=
1040 wlc->protection->nmode_user) 320 wlc->protection->nmode_user)
@@ -1042,75 +322,19 @@ brcms_c_set_country_common(struct brcms_cm_info *wlc_cm,
1042 322
1043 brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_2G_INDEX]); 323 brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_2G_INDEX]);
1044 brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_5G_INDEX]); 324 brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_5G_INDEX]);
1045 /* set or restore gmode as required by regulatory */
1046 locale = brcms_c_get_locale_2g(country->locale_2G);
1047 if (locale && (locale->flags & BRCMS_NO_OFDM))
1048 brcms_c_set_gmode(wlc, GMODE_LEGACY_B, false);
1049 else
1050 brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false);
1051 325
1052 brcms_c_channels_init(wlc_cm, country); 326 brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false);
1053 327
1054 return; 328 return;
1055} 329}
1056 330
1057static int
1058brcms_c_set_countrycode_rev(struct brcms_cm_info *wlc_cm,
1059 const char *country_abbrev,
1060 const char *ccode, int regrev)
1061{
1062 const struct country_info *country;
1063 char mapped_ccode[BRCM_CNTRY_BUF_SZ];
1064 uint mapped_regrev;
1065
1066 /* if regrev is -1, lookup the mapped country code,
1067 * otherwise use the ccode and regrev directly
1068 */
1069 if (regrev == -1) {
1070 /*
1071 * map the country code to a built-in country
1072 * code, regrev, and country_info
1073 */
1074 country =
1075 brcms_c_countrycode_map(wlc_cm, ccode, mapped_ccode,
1076 &mapped_regrev);
1077 } else {
1078 /* find the matching built-in country definition */
1079 country = brcms_c_country_lookup_direct(ccode, regrev);
1080 strncpy(mapped_ccode, ccode, BRCM_CNTRY_BUF_SZ);
1081 mapped_regrev = regrev;
1082 }
1083
1084 if (country == NULL)
1085 return -EINVAL;
1086
1087 /* set the driver state for the country */
1088 brcms_c_set_country_common(wlc_cm, country_abbrev, mapped_ccode,
1089 mapped_regrev, country);
1090
1091 return 0;
1092}
1093
1094/*
1095 * set the driver's current country and regulatory information using
1096 * a country code as the source. Lookup built in country information
1097 * found with the country code.
1098 */
1099static int
1100brcms_c_set_countrycode(struct brcms_cm_info *wlc_cm, const char *ccode)
1101{
1102 char country_abbrev[BRCM_CNTRY_BUF_SZ];
1103 strncpy(country_abbrev, ccode, BRCM_CNTRY_BUF_SZ);
1104 return brcms_c_set_countrycode_rev(wlc_cm, country_abbrev, ccode, -1);
1105}
1106
1107struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc) 331struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc)
1108{ 332{
1109 struct brcms_cm_info *wlc_cm; 333 struct brcms_cm_info *wlc_cm;
1110 char country_abbrev[BRCM_CNTRY_BUF_SZ];
1111 const struct country_info *country;
1112 struct brcms_pub *pub = wlc->pub; 334 struct brcms_pub *pub = wlc->pub;
1113 struct ssb_sprom *sprom = &wlc->hw->d11core->bus->sprom; 335 struct ssb_sprom *sprom = &wlc->hw->d11core->bus->sprom;
336 const char *ccode = sprom->alpha2;
337 int ccode_len = sizeof(sprom->alpha2);
1114 338
1115 BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit); 339 BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
1116 340
@@ -1122,24 +346,27 @@ struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc)
1122 wlc->cmi = wlc_cm; 346 wlc->cmi = wlc_cm;
1123 347
1124 /* store the country code for passing up as a regulatory hint */ 348 /* store the country code for passing up as a regulatory hint */
1125 if (sprom->alpha2 && brcms_c_country_valid(sprom->alpha2)) 349 wlc_cm->world_regd = brcms_world_regd(ccode, ccode_len);
1126 strncpy(wlc->pub->srom_ccode, sprom->alpha2, sizeof(sprom->alpha2)); 350 if (brcms_c_country_valid(ccode))
351 strncpy(wlc->pub->srom_ccode, ccode, ccode_len);
1127 352
1128 /* 353 /*
1129 * internal country information which must match 354 * If no custom world domain is found in the SROM, use the
1130 * regulatory constraints in firmware 355 * default "X2" domain.
1131 */ 356 */
1132 memset(country_abbrev, 0, BRCM_CNTRY_BUF_SZ); 357 if (!wlc_cm->world_regd) {
1133 strncpy(country_abbrev, "X2", sizeof(country_abbrev) - 1); 358 wlc_cm->world_regd = brcms_default_world_regd();
1134 country = brcms_c_country_lookup(wlc, country_abbrev); 359 ccode = wlc_cm->world_regd->regdomain->alpha2;
360 ccode_len = BRCM_CNTRY_BUF_SZ - 1;
361 }
1135 362
1136 /* save default country for exiting 11d regulatory mode */ 363 /* save default country for exiting 11d regulatory mode */
1137 strncpy(wlc->country_default, country_abbrev, BRCM_CNTRY_BUF_SZ - 1); 364 strncpy(wlc->country_default, ccode, ccode_len);
1138 365
1139 /* initialize autocountry_default to driver default */ 366 /* initialize autocountry_default to driver default */
1140 strncpy(wlc->autocountry_default, "X2", BRCM_CNTRY_BUF_SZ - 1); 367 strncpy(wlc->autocountry_default, ccode, ccode_len);
1141 368
1142 brcms_c_set_countrycode(wlc_cm, country_abbrev); 369 brcms_c_set_country(wlc_cm, wlc_cm->world_regd);
1143 370
1144 return wlc_cm; 371 return wlc_cm;
1145} 372}
@@ -1149,31 +376,15 @@ void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm)
1149 kfree(wlc_cm); 376 kfree(wlc_cm);
1150} 377}
1151 378
1152u8
1153brcms_c_channel_locale_flags_in_band(struct brcms_cm_info *wlc_cm,
1154 uint bandunit)
1155{
1156 return wlc_cm->bandstate[bandunit].locale_flags;
1157}
1158
1159static bool
1160brcms_c_quiet_chanspec(struct brcms_cm_info *wlc_cm, u16 chspec)
1161{
1162 return (wlc_cm->wlc->pub->_n_enab & SUPPORT_11N) &&
1163 CHSPEC_IS40(chspec) ?
1164 (isset(wlc_cm->quiet_channels.vec,
1165 lower_20_sb(CHSPEC_CHANNEL(chspec))) ||
1166 isset(wlc_cm->quiet_channels.vec,
1167 upper_20_sb(CHSPEC_CHANNEL(chspec)))) :
1168 isset(wlc_cm->quiet_channels.vec, CHSPEC_CHANNEL(chspec));
1169}
1170
1171void 379void
1172brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec, 380brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
1173 u8 local_constraint_qdbm) 381 u8 local_constraint_qdbm)
1174{ 382{
1175 struct brcms_c_info *wlc = wlc_cm->wlc; 383 struct brcms_c_info *wlc = wlc_cm->wlc;
384 struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.channel;
385 const struct ieee80211_reg_rule *reg_rule;
1176 struct txpwr_limits txpwr; 386 struct txpwr_limits txpwr;
387 int ret;
1177 388
1178 brcms_c_channel_reg_limits(wlc_cm, chanspec, &txpwr); 389 brcms_c_channel_reg_limits(wlc_cm, chanspec, &txpwr);
1179 390
@@ -1181,8 +392,15 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
1181 wlc_cm, &txpwr, local_constraint_qdbm 392 wlc_cm, &txpwr, local_constraint_qdbm
1182 ); 393 );
1183 394
395 /* set or restore gmode as required by regulatory */
396 ret = freq_reg_info(wlc->wiphy, ch->center_freq, 0, &reg_rule);
397 if (!ret && (reg_rule->flags & NL80211_RRF_NO_OFDM))
398 brcms_c_set_gmode(wlc, GMODE_LEGACY_B, false);
399 else
400 brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false);
401
1184 brcms_b_set_chanspec(wlc->hw, chanspec, 402 brcms_b_set_chanspec(wlc->hw, chanspec,
1185 (brcms_c_quiet_chanspec(wlc_cm, chanspec) != 0), 403 !!(ch->flags & IEEE80211_CHAN_PASSIVE_SCAN),
1186 &txpwr); 404 &txpwr);
1187} 405}
1188 406
@@ -1191,15 +409,14 @@ brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
1191 struct txpwr_limits *txpwr) 409 struct txpwr_limits *txpwr)
1192{ 410{
1193 struct brcms_c_info *wlc = wlc_cm->wlc; 411 struct brcms_c_info *wlc = wlc_cm->wlc;
412 struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.channel;
1194 uint i; 413 uint i;
1195 uint chan; 414 uint chan;
1196 int maxpwr; 415 int maxpwr;
1197 int delta; 416 int delta;
1198 const struct country_info *country; 417 const struct country_info *country;
1199 struct brcms_band *band; 418 struct brcms_band *band;
1200 const struct locale_info *li;
1201 int conducted_max = BRCMS_TXPWR_MAX; 419 int conducted_max = BRCMS_TXPWR_MAX;
1202 int conducted_ofdm_max = BRCMS_TXPWR_MAX;
1203 const struct locale_mimo_info *li_mimo; 420 const struct locale_mimo_info *li_mimo;
1204 int maxpwr20, maxpwr40; 421 int maxpwr20, maxpwr40;
1205 int maxpwr_idx; 422 int maxpwr_idx;
@@ -1207,67 +424,35 @@ brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
1207 424
1208 memset(txpwr, 0, sizeof(struct txpwr_limits)); 425 memset(txpwr, 0, sizeof(struct txpwr_limits));
1209 426
1210 if (!brcms_c_valid_chanspec_db(wlc_cm, chanspec)) { 427 if (WARN_ON(!ch))
1211 country = brcms_c_country_lookup(wlc, wlc->autocountry_default); 428 return;
1212 if (country == NULL) 429
1213 return; 430 country = &wlc_cm->world_regd->country;
1214 } else {
1215 country = wlc_cm->country;
1216 }
1217 431
1218 chan = CHSPEC_CHANNEL(chanspec); 432 chan = CHSPEC_CHANNEL(chanspec);
1219 band = wlc->bandstate[chspec_bandunit(chanspec)]; 433 band = wlc->bandstate[chspec_bandunit(chanspec)];
1220 li = (band->bandtype == BRCM_BAND_5G) ?
1221 brcms_c_get_locale_5g(country->locale_5G) :
1222 brcms_c_get_locale_2g(country->locale_2G);
1223
1224 li_mimo = (band->bandtype == BRCM_BAND_5G) ? 434 li_mimo = (band->bandtype == BRCM_BAND_5G) ?
1225 brcms_c_get_mimo_5g(country->locale_mimo_5G) : 435 brcms_c_get_mimo_5g(country->locale_mimo_5G) :
1226 brcms_c_get_mimo_2g(country->locale_mimo_2G); 436 brcms_c_get_mimo_2g(country->locale_mimo_2G);
1227 437
1228 if (li->flags & BRCMS_EIRP) { 438 delta = band->antgain;
1229 delta = band->antgain;
1230 } else {
1231 delta = 0;
1232 if (band->antgain > QDB(6))
1233 delta = band->antgain - QDB(6); /* Excess over 6 dB */
1234 }
1235 439
1236 if (li == &locale_i) { 440 if (band->bandtype == BRCM_BAND_2G)
1237 conducted_max = QDB(22); 441 conducted_max = QDB(22);
1238 conducted_ofdm_max = QDB(22); 442
1239 } 443 maxpwr = QDB(ch->max_power) - delta;
444 maxpwr = max(maxpwr, 0);
445 maxpwr = min(maxpwr, conducted_max);
1240 446
1241 /* CCK txpwr limits for 2.4G band */ 447 /* CCK txpwr limits for 2.4G band */
1242 if (band->bandtype == BRCM_BAND_2G) { 448 if (band->bandtype == BRCM_BAND_2G) {
1243 maxpwr = li->maxpwr[CHANNEL_POWER_IDX_2G_CCK(chan)];
1244
1245 maxpwr = maxpwr - delta;
1246 maxpwr = max(maxpwr, 0);
1247 maxpwr = min(maxpwr, conducted_max);
1248
1249 for (i = 0; i < BRCMS_NUM_RATES_CCK; i++) 449 for (i = 0; i < BRCMS_NUM_RATES_CCK; i++)
1250 txpwr->cck[i] = (u8) maxpwr; 450 txpwr->cck[i] = (u8) maxpwr;
1251 } 451 }
1252 452
1253 /* OFDM txpwr limits for 2.4G or 5G bands */ 453 for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++) {
1254 if (band->bandtype == BRCM_BAND_2G)
1255 maxpwr = li->maxpwr[CHANNEL_POWER_IDX_2G_OFDM(chan)];
1256 else
1257 maxpwr = li->maxpwr[CHANNEL_POWER_IDX_5G(chan)];
1258
1259 maxpwr = maxpwr - delta;
1260 maxpwr = max(maxpwr, 0);
1261 maxpwr = min(maxpwr, conducted_ofdm_max);
1262
1263 /* Keep OFDM lmit below CCK limit */
1264 if (band->bandtype == BRCM_BAND_2G)
1265 maxpwr = min_t(int, maxpwr, txpwr->cck[0]);
1266
1267 for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
1268 txpwr->ofdm[i] = (u8) maxpwr; 454 txpwr->ofdm[i] = (u8) maxpwr;
1269 455
1270 for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++) {
1271 /* 456 /*
1272 * OFDM 40 MHz SISO has the same power as the corresponding 457 * OFDM 40 MHz SISO has the same power as the corresponding
1273 * MCS0-7 rate unless overriden by the locale specific code. 458 * MCS0-7 rate unless overriden by the locale specific code.
@@ -1282,14 +467,9 @@ brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
1282 txpwr->ofdm_40_cdd[i] = 0; 467 txpwr->ofdm_40_cdd[i] = 0;
1283 } 468 }
1284 469
1285 /* MIMO/HT specific limits */ 470 delta = 0;
1286 if (li_mimo->flags & BRCMS_EIRP) { 471 if (band->antgain > QDB(6))
1287 delta = band->antgain; 472 delta = band->antgain - QDB(6); /* Excess over 6 dB */
1288 } else {
1289 delta = 0;
1290 if (band->antgain > QDB(6))
1291 delta = band->antgain - QDB(6); /* Excess over 6 dB */
1292 }
1293 473
1294 if (band->bandtype == BRCM_BAND_2G) 474 if (band->bandtype == BRCM_BAND_2G)
1295 maxpwr_idx = (chan - 1); 475 maxpwr_idx = (chan - 1);
@@ -1431,8 +611,7 @@ static bool brcms_c_chspec_malformed(u16 chanspec)
1431 * and they are also a legal HT combination 611 * and they are also a legal HT combination
1432 */ 612 */
1433static bool 613static bool
1434brcms_c_valid_chanspec_ext(struct brcms_cm_info *wlc_cm, u16 chspec, 614brcms_c_valid_chanspec_ext(struct brcms_cm_info *wlc_cm, u16 chspec)
1435 bool dualband)
1436{ 615{
1437 struct brcms_c_info *wlc = wlc_cm->wlc; 616 struct brcms_c_info *wlc = wlc_cm->wlc;
1438 u8 channel = CHSPEC_CHANNEL(chspec); 617 u8 channel = CHSPEC_CHANNEL(chspec);
@@ -1448,59 +627,163 @@ brcms_c_valid_chanspec_ext(struct brcms_cm_info *wlc_cm, u16 chspec,
1448 chspec_bandunit(chspec)) 627 chspec_bandunit(chspec))
1449 return false; 628 return false;
1450 629
1451 /* Check a 20Mhz channel */ 630 return true;
1452 if (CHSPEC_IS20(chspec)) { 631}
1453 if (dualband) 632
1454 return brcms_c_valid_channel20_db(wlc_cm->wlc->cmi, 633bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm, u16 chspec)
1455 channel); 634{
1456 else 635 return brcms_c_valid_chanspec_ext(wlc_cm, chspec);
1457 return brcms_c_valid_channel20(wlc_cm->wlc->cmi, 636}
1458 channel); 637
638static bool brcms_is_radar_freq(u16 center_freq)
639{
640 return center_freq >= 5260 && center_freq <= 5700;
641}
642
643static void brcms_reg_apply_radar_flags(struct wiphy *wiphy)
644{
645 struct ieee80211_supported_band *sband;
646 struct ieee80211_channel *ch;
647 int i;
648
649 sband = wiphy->bands[IEEE80211_BAND_5GHZ];
650 if (!sband)
651 return;
652
653 for (i = 0; i < sband->n_channels; i++) {
654 ch = &sband->channels[i];
655
656 if (!brcms_is_radar_freq(ch->center_freq))
657 continue;
658
659 /*
660 * All channels in this range should be passive and have
661 * DFS enabled.
662 */
663 if (!(ch->flags & IEEE80211_CHAN_DISABLED))
664 ch->flags |= IEEE80211_CHAN_RADAR |
665 IEEE80211_CHAN_NO_IBSS |
666 IEEE80211_CHAN_PASSIVE_SCAN;
1459 } 667 }
1460#ifdef SUPPORT_40MHZ 668}
1461 /* 669
1462 * We know we are now checking a 40MHZ channel, so we should 670static void
1463 * only be here for NPHYS 671brcms_reg_apply_beaconing_flags(struct wiphy *wiphy,
1464 */ 672 enum nl80211_reg_initiator initiator)
1465 if (BRCMS_ISNPHY(wlc->band) || BRCMS_ISSSLPNPHY(wlc->band)) { 673{
1466 u8 upper_sideband = 0, idx; 674 struct ieee80211_supported_band *sband;
1467 u8 num_ch20_entries = 675 struct ieee80211_channel *ch;
1468 sizeof(chan20_info) / sizeof(struct chan20_info); 676 const struct ieee80211_reg_rule *rule;
1469 677 int band, i, ret;
1470 if (!VALID_40CHANSPEC_IN_BAND(wlc, chspec_bandunit(chspec))) 678
1471 return false; 679 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
1472 680 sband = wiphy->bands[band];
1473 if (dualband) { 681 if (!sband)
1474 if (!brcms_c_valid_channel20_db(wlc->cmi, 682 continue;
1475 lower_20_sb(channel)) || 683
1476 !brcms_c_valid_channel20_db(wlc->cmi, 684 for (i = 0; i < sband->n_channels; i++) {
1477 upper_20_sb(channel))) 685 ch = &sband->channels[i];
1478 return false; 686
1479 } else { 687 if (ch->flags &
1480 if (!brcms_c_valid_channel20(wlc->cmi, 688 (IEEE80211_CHAN_DISABLED | IEEE80211_CHAN_RADAR))
1481 lower_20_sb(channel)) || 689 continue;
1482 !brcms_c_valid_channel20(wlc->cmi, 690
1483 upper_20_sb(channel))) 691 if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) {
1484 return false; 692 ret = freq_reg_info(wiphy, ch->center_freq,
693 0, &rule);
694 if (ret)
695 continue;
696
697 if (!(rule->flags & NL80211_RRF_NO_IBSS))
698 ch->flags &= ~IEEE80211_CHAN_NO_IBSS;
699 if (!(rule->flags & NL80211_RRF_PASSIVE_SCAN))
700 ch->flags &=
701 ~IEEE80211_CHAN_PASSIVE_SCAN;
702 } else if (ch->beacon_found) {
703 ch->flags &= ~(IEEE80211_CHAN_NO_IBSS |
704 IEEE80211_CHAN_PASSIVE_SCAN);
705 }
1485 } 706 }
707 }
708}
1486 709
1487 /* find the lower sideband info in the sideband array */ 710static int brcms_reg_notifier(struct wiphy *wiphy,
1488 for (idx = 0; idx < num_ch20_entries; idx++) { 711 struct regulatory_request *request)
1489 if (chan20_info[idx].sb == lower_20_sb(channel)) 712{
1490 upper_sideband = chan20_info[idx].adj_sbs; 713 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
714 struct brcms_info *wl = hw->priv;
715 struct brcms_c_info *wlc = wl->wlc;
716 struct ieee80211_supported_band *sband;
717 struct ieee80211_channel *ch;
718 int band, i;
719 bool ch_found = false;
720
721 brcms_reg_apply_radar_flags(wiphy);
722
723 if (request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE)
724 brcms_reg_apply_beaconing_flags(wiphy, request->initiator);
725
726 /* Disable radio if all channels disallowed by regulatory */
727 for (band = 0; !ch_found && band < IEEE80211_NUM_BANDS; band++) {
728 sband = wiphy->bands[band];
729 if (!sband)
730 continue;
731
732 for (i = 0; !ch_found && i < sband->n_channels; i++) {
733 ch = &sband->channels[i];
734
735 if (!(ch->flags & IEEE80211_CHAN_DISABLED))
736 ch_found = true;
1491 } 737 }
1492 /* check that the lower sideband allows an upper sideband */
1493 if ((upper_sideband & (CH_UPPER_SB | CH_EWA_VALID)) ==
1494 (CH_UPPER_SB | CH_EWA_VALID))
1495 return true;
1496 return false;
1497 } 738 }
1498#endif /* 40 MHZ */
1499 739
1500 return false; 740 if (ch_found) {
741 mboolclr(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE);
742 } else {
743 mboolset(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE);
744 wiphy_err(wlc->wiphy, "wl%d: %s: no valid channel for \"%s\"\n",
745 wlc->pub->unit, __func__, request->alpha2);
746 }
747
748 if (wlc->pub->_nbands > 1 || wlc->band->bandtype == BRCM_BAND_2G)
749 wlc_phy_chanspec_ch14_widefilter_set(wlc->band->pi,
750 brcms_c_japan_ccode(request->alpha2));
751
752 return 0;
1501} 753}
1502 754
1503bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm, u16 chspec) 755void brcms_c_regd_init(struct brcms_c_info *wlc)
1504{ 756{
1505 return brcms_c_valid_chanspec_ext(wlc_cm, chspec, true); 757 struct wiphy *wiphy = wlc->wiphy;
758 const struct brcms_regd *regd = wlc->cmi->world_regd;
759 struct ieee80211_supported_band *sband;
760 struct ieee80211_channel *ch;
761 struct brcms_chanvec sup_chan;
762 struct brcms_band *band;
763 int band_idx, i;
764
765 /* Disable any channels not supported by the phy */
766 for (band_idx = 0; band_idx < wlc->pub->_nbands; band_idx++) {
767 band = wlc->bandstate[band_idx];
768
769 wlc_phy_chanspec_band_validch(band->pi, band->bandtype,
770 &sup_chan);
771
772 if (band_idx == BAND_2G_INDEX)
773 sband = wiphy->bands[IEEE80211_BAND_2GHZ];
774 else
775 sband = wiphy->bands[IEEE80211_BAND_5GHZ];
776
777 for (i = 0; i < sband->n_channels; i++) {
778 ch = &sband->channels[i];
779 if (!isset(sup_chan.vec, ch->hw_value))
780 ch->flags |= IEEE80211_CHAN_DISABLED;
781 }
782 }
783
784 wlc->wiphy->reg_notifier = brcms_reg_notifier;
785 wlc->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
786 WIPHY_FLAG_STRICT_REGULATORY;
787 wiphy_apply_custom_regulatory(wlc->wiphy, regd->regdomain);
788 brcms_reg_apply_beaconing_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER);
1506} 789}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.h b/drivers/net/wireless/brcm80211/brcmsmac/channel.h
index 808cb4fbfbe7..006483a0abe6 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.h
@@ -37,9 +37,6 @@ brcms_c_channel_mgr_attach(struct brcms_c_info *wlc);
37 37
38extern void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm); 38extern void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm);
39 39
40extern u8 brcms_c_channel_locale_flags_in_band(struct brcms_cm_info *wlc_cm,
41 uint bandunit);
42
43extern bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm, 40extern bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm,
44 u16 chspec); 41 u16 chspec);
45 42
@@ -49,5 +46,6 @@ extern void brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm,
49extern void brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, 46extern void brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm,
50 u16 chanspec, 47 u16 chanspec,
51 u8 local_constraint_qdbm); 48 u8 local_constraint_qdbm);
49extern void brcms_c_regd_init(struct brcms_c_info *wlc);
52 50
53#endif /* _WLC_CHANNEL_H */ 51#endif /* _WLC_CHANNEL_H */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
index 11054ae9d4f6..5e53305bd9a9 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
@@ -573,6 +573,7 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
573 struct dma_info *di; 573 struct dma_info *di;
574 u8 rev = core->id.rev; 574 u8 rev = core->id.rev;
575 uint size; 575 uint size;
576 struct si_info *sii = container_of(sih, struct si_info, pub);
576 577
577 /* allocate private info structure */ 578 /* allocate private info structure */
578 di = kzalloc(sizeof(struct dma_info), GFP_ATOMIC); 579 di = kzalloc(sizeof(struct dma_info), GFP_ATOMIC);
@@ -633,16 +634,20 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
633 */ 634 */
634 di->ddoffsetlow = 0; 635 di->ddoffsetlow = 0;
635 di->dataoffsetlow = 0; 636 di->dataoffsetlow = 0;
636 /* add offset for pcie with DMA64 bus */ 637 /* for pci bus, add offset */
637 di->ddoffsetlow = 0; 638 if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI) {
638 di->ddoffsethigh = SI_PCIE_DMA_H32; 639 /* add offset for pcie with DMA64 bus */
640 di->ddoffsetlow = 0;
641 di->ddoffsethigh = SI_PCIE_DMA_H32;
642 }
639 di->dataoffsetlow = di->ddoffsetlow; 643 di->dataoffsetlow = di->ddoffsetlow;
640 di->dataoffsethigh = di->ddoffsethigh; 644 di->dataoffsethigh = di->ddoffsethigh;
645
641 /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */ 646 /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
642 if ((core->id.id == SDIOD_CORE_ID) 647 if ((core->id.id == BCMA_CORE_SDIO_DEV)
643 && ((rev > 0) && (rev <= 2))) 648 && ((rev > 0) && (rev <= 2)))
644 di->addrext = false; 649 di->addrext = false;
645 else if ((core->id.id == I2S_CORE_ID) && 650 else if ((core->id.id == BCMA_CORE_I2S) &&
646 ((rev == 0) || (rev == 1))) 651 ((rev == 0) || (rev == 1)))
647 di->addrext = false; 652 di->addrext = false;
648 else 653 else
@@ -1433,7 +1438,7 @@ void dma_walk_packets(struct dma_pub *dmah, void (*callback_fnc)
1433 struct ieee80211_tx_info *tx_info; 1438 struct ieee80211_tx_info *tx_info;
1434 1439
1435 while (i != end) { 1440 while (i != end) {
1436 skb = (struct sk_buff *)di->txp[i]; 1441 skb = di->txp[i];
1437 if (skb != NULL) { 1442 if (skb != NULL) {
1438 tx_info = (struct ieee80211_tx_info *)skb->cb; 1443 tx_info = (struct ieee80211_tx_info *)skb->cb;
1439 (callback_fnc)(tx_info, arg_a); 1444 (callback_fnc)(tx_info, arg_a);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 50f92a0b7c41..2b57f57a7927 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -319,8 +319,7 @@ static void brcms_ops_stop(struct ieee80211_hw *hw)
319 return; 319 return;
320 320
321 spin_lock_bh(&wl->lock); 321 spin_lock_bh(&wl->lock);
322 status = brcms_c_chipmatch(wl->wlc->hw->vendorid, 322 status = brcms_c_chipmatch(wl->wlc->hw->d11core);
323 wl->wlc->hw->deviceid);
324 spin_unlock_bh(&wl->lock); 323 spin_unlock_bh(&wl->lock);
325 if (!status) { 324 if (!status) {
326 wiphy_err(wl->wiphy, 325 wiphy_err(wl->wiphy,
@@ -721,14 +720,6 @@ static const struct ieee80211_ops brcms_ops = {
721 .flush = brcms_ops_flush, 720 .flush = brcms_ops_flush,
722}; 721};
723 722
724/*
725 * is called in brcms_bcma_probe() context, therefore no locking required.
726 */
727static int brcms_set_hint(struct brcms_info *wl, char *abbrev)
728{
729 return regulatory_hint(wl->pub->ieee_hw->wiphy, abbrev);
730}
731
732void brcms_dpc(unsigned long data) 723void brcms_dpc(unsigned long data)
733{ 724{
734 struct brcms_info *wl; 725 struct brcms_info *wl;
@@ -1058,6 +1049,8 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
1058 goto fail; 1049 goto fail;
1059 } 1050 }
1060 1051
1052 brcms_c_regd_init(wl->wlc);
1053
1061 memcpy(perm, &wl->pub->cur_etheraddr, ETH_ALEN); 1054 memcpy(perm, &wl->pub->cur_etheraddr, ETH_ALEN);
1062 if (WARN_ON(!is_valid_ether_addr(perm))) 1055 if (WARN_ON(!is_valid_ether_addr(perm)))
1063 goto fail; 1056 goto fail;
@@ -1068,9 +1061,9 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
1068 wiphy_err(wl->wiphy, "%s: ieee80211_register_hw failed, status" 1061 wiphy_err(wl->wiphy, "%s: ieee80211_register_hw failed, status"
1069 "%d\n", __func__, err); 1062 "%d\n", __func__, err);
1070 1063
1071 if (wl->pub->srom_ccode[0] && brcms_set_hint(wl, wl->pub->srom_ccode)) 1064 if (wl->pub->srom_ccode[0] &&
1072 wiphy_err(wl->wiphy, "%s: regulatory_hint failed, status %d\n", 1065 regulatory_hint(wl->wiphy, wl->pub->srom_ccode))
1073 __func__, err); 1066 wiphy_err(wl->wiphy, "%s: regulatory hint failed\n", __func__);
1074 1067
1075 n_adapters_found++; 1068 n_adapters_found++;
1076 return wl; 1069 return wl;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 19db4052c44c..cb73f2250b11 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -18,6 +18,7 @@
18 18
19#include <linux/pci_ids.h> 19#include <linux/pci_ids.h>
20#include <linux/if_ether.h> 20#include <linux/if_ether.h>
21#include <net/cfg80211.h>
21#include <net/mac80211.h> 22#include <net/mac80211.h>
22#include <brcm_hw_ids.h> 23#include <brcm_hw_ids.h>
23#include <aiutils.h> 24#include <aiutils.h>
@@ -268,7 +269,7 @@ struct brcms_c_bit_desc {
268 */ 269 */
269 270
270/* Starting corerev for the fifo size table */ 271/* Starting corerev for the fifo size table */
271#define XMTFIFOTBL_STARTREV 20 272#define XMTFIFOTBL_STARTREV 17
272 273
273struct d11init { 274struct d11init {
274 __le16 addr; 275 __le16 addr;
@@ -332,6 +333,12 @@ const u8 wlc_prio2prec_map[] = {
332}; 333};
333 334
334static const u16 xmtfifo_sz[][NFIFO] = { 335static const u16 xmtfifo_sz[][NFIFO] = {
336 /* corerev 17: 5120, 49152, 49152, 5376, 4352, 1280 */
337 {20, 192, 192, 21, 17, 5},
338 /* corerev 18: */
339 {0, 0, 0, 0, 0, 0},
340 /* corerev 19: */
341 {0, 0, 0, 0, 0, 0},
335 /* corerev 20: 5120, 49152, 49152, 5376, 4352, 1280 */ 342 /* corerev 20: 5120, 49152, 49152, 5376, 4352, 1280 */
336 {20, 192, 192, 21, 17, 5}, 343 {20, 192, 192, 21, 17, 5},
337 /* corerev 21: 2304, 14848, 5632, 3584, 3584, 1280 */ 344 /* corerev 21: 2304, 14848, 5632, 3584, 3584, 1280 */
@@ -342,6 +349,14 @@ static const u16 xmtfifo_sz[][NFIFO] = {
342 {20, 192, 192, 21, 17, 5}, 349 {20, 192, 192, 21, 17, 5},
343 /* corerev 24: 2304, 14848, 5632, 3584, 3584, 1280 */ 350 /* corerev 24: 2304, 14848, 5632, 3584, 3584, 1280 */
344 {9, 58, 22, 14, 14, 5}, 351 {9, 58, 22, 14, 14, 5},
352 /* corerev 25: */
353 {0, 0, 0, 0, 0, 0},
354 /* corerev 26: */
355 {0, 0, 0, 0, 0, 0},
356 /* corerev 27: */
357 {0, 0, 0, 0, 0, 0},
358 /* corerev 28: 2304, 14848, 5632, 3584, 3584, 1280 */
359 {9, 58, 22, 14, 14, 5},
345}; 360};
346 361
347#ifdef DEBUG 362#ifdef DEBUG
@@ -1941,7 +1956,8 @@ static bool brcms_b_radio_read_hwdisabled(struct brcms_hardware *wlc_hw)
1941 * accesses phyreg throughput mac. This can be skipped since 1956 * accesses phyreg throughput mac. This can be skipped since
1942 * only mac reg is accessed below 1957 * only mac reg is accessed below
1943 */ 1958 */
1944 flags |= SICF_PCLKE; 1959 if (D11REV_GE(wlc_hw->corerev, 18))
1960 flags |= SICF_PCLKE;
1945 1961
1946 /* 1962 /*
1947 * TODO: test suspend/resume 1963 * TODO: test suspend/resume
@@ -2022,7 +2038,8 @@ void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
2022 * phyreg throughput mac, AND phy_reset is skipped at early stage when 2038 * phyreg throughput mac, AND phy_reset is skipped at early stage when
2023 * band->pi is invalid. need to enable PHY CLK 2039 * band->pi is invalid. need to enable PHY CLK
2024 */ 2040 */
2025 flags |= SICF_PCLKE; 2041 if (D11REV_GE(wlc_hw->corerev, 18))
2042 flags |= SICF_PCLKE;
2026 2043
2027 /* 2044 /*
2028 * reset the core 2045 * reset the core
@@ -2125,8 +2142,8 @@ void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode)
2125{ 2142{
2126 struct bcma_device *core = wlc_hw->d11core; 2143 struct bcma_device *core = wlc_hw->d11core;
2127 2144
2128 if ((ai_get_chip_id(wlc_hw->sih) == BCM43224_CHIP_ID) || 2145 if ((ai_get_chip_id(wlc_hw->sih) == BCMA_CHIP_ID_BCM43224) ||
2129 (ai_get_chip_id(wlc_hw->sih) == BCM43225_CHIP_ID)) { 2146 (ai_get_chip_id(wlc_hw->sih) == BCMA_CHIP_ID_BCM43225)) {
2130 if (spurmode == WL_SPURAVOID_ON2) { /* 126Mhz */ 2147 if (spurmode == WL_SPURAVOID_ON2) { /* 126Mhz */
2131 bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x2082); 2148 bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x2082);
2132 bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8); 2149 bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8);
@@ -2790,7 +2807,7 @@ void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on)
2790 tmp = 0; 2807 tmp = 0;
2791 2808
2792 if (on) { 2809 if (on) {
2793 if ((ai_get_chip_id(wlc_hw->sih) == BCM4313_CHIP_ID)) { 2810 if ((ai_get_chip_id(wlc_hw->sih) == BCMA_CHIP_ID_BCM4313)) {
2794 bcma_set32(core, D11REGOFFS(clk_ctl_st), 2811 bcma_set32(core, D11REGOFFS(clk_ctl_st),
2795 CCS_ERSRC_REQ_HT | 2812 CCS_ERSRC_REQ_HT |
2796 CCS_ERSRC_REQ_D11PLL | 2813 CCS_ERSRC_REQ_D11PLL |
@@ -3139,20 +3156,6 @@ void brcms_c_reset(struct brcms_c_info *wlc)
3139 brcms_b_reset(wlc->hw); 3156 brcms_b_reset(wlc->hw);
3140} 3157}
3141 3158
3142/* Return the channel the driver should initialize during brcms_c_init.
3143 * the channel may have to be changed from the currently configured channel
3144 * if other configurations are in conflict (bandlocked, 11n mode disabled,
3145 * invalid channel for current country, etc.)
3146 */
3147static u16 brcms_c_init_chanspec(struct brcms_c_info *wlc)
3148{
3149 u16 chanspec =
3150 1 | WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE |
3151 WL_CHANSPEC_BAND_2G;
3152
3153 return chanspec;
3154}
3155
3156void brcms_c_init_scb(struct scb *scb) 3159void brcms_c_init_scb(struct scb *scb)
3157{ 3160{
3158 int i; 3161 int i;
@@ -4231,9 +4234,8 @@ static void brcms_c_radio_timer(void *arg)
4231} 4234}
4232 4235
4233/* common low-level watchdog code */ 4236/* common low-level watchdog code */
4234static void brcms_b_watchdog(void *arg) 4237static void brcms_b_watchdog(struct brcms_c_info *wlc)
4235{ 4238{
4236 struct brcms_c_info *wlc = (struct brcms_c_info *) arg;
4237 struct brcms_hardware *wlc_hw = wlc->hw; 4239 struct brcms_hardware *wlc_hw = wlc->hw;
4238 4240
4239 BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit); 4241 BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
@@ -4254,10 +4256,8 @@ static void brcms_b_watchdog(void *arg)
4254} 4256}
4255 4257
4256/* common watchdog code */ 4258/* common watchdog code */
4257static void brcms_c_watchdog(void *arg) 4259static void brcms_c_watchdog(struct brcms_c_info *wlc)
4258{ 4260{
4259 struct brcms_c_info *wlc = (struct brcms_c_info *) arg;
4260
4261 BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit); 4261 BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
4262 4262
4263 if (!wlc->pub->up) 4263 if (!wlc->pub->up)
@@ -4297,7 +4297,9 @@ static void brcms_c_watchdog(void *arg)
4297 4297
4298static void brcms_c_watchdog_by_timer(void *arg) 4298static void brcms_c_watchdog_by_timer(void *arg)
4299{ 4299{
4300 brcms_c_watchdog(arg); 4300 struct brcms_c_info *wlc = (struct brcms_c_info *) arg;
4301
4302 brcms_c_watchdog(wlc);
4301} 4303}
4302 4304
4303static bool brcms_c_timers_init(struct brcms_c_info *wlc, int unit) 4305static bool brcms_c_timers_init(struct brcms_c_info *wlc, int unit)
@@ -4467,11 +4469,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
4467 } 4469 }
4468 4470
4469 /* verify again the device is supported */ 4471 /* verify again the device is supported */
4470 if (core->bus->hosttype == BCMA_HOSTTYPE_PCI && 4472 if (!brcms_c_chipmatch(core)) {
4471 !brcms_c_chipmatch(pcidev->vendor, pcidev->device)) { 4473 wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported device\n",
4472 wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported " 4474 unit);
4473 "vendor/device (0x%x/0x%x)\n",
4474 unit, pcidev->vendor, pcidev->device);
4475 err = 12; 4475 err = 12;
4476 goto fail; 4476 goto fail;
4477 } 4477 }
@@ -4541,7 +4541,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
4541 else 4541 else
4542 wlc_hw->_nbands = 1; 4542 wlc_hw->_nbands = 1;
4543 4543
4544 if ((ai_get_chip_id(wlc_hw->sih) == BCM43225_CHIP_ID)) 4544 if ((ai_get_chip_id(wlc_hw->sih) == BCMA_CHIP_ID_BCM43225))
4545 wlc_hw->_nbands = 1; 4545 wlc_hw->_nbands = 1;
4546 4546
4547 /* BMAC_NOTE: remove init of pub values when brcms_c_attach() 4547 /* BMAC_NOTE: remove init of pub values when brcms_c_attach()
@@ -4608,8 +4608,12 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
4608 wlc_hw->machwcap_backup = wlc_hw->machwcap; 4608 wlc_hw->machwcap_backup = wlc_hw->machwcap;
4609 4609
4610 /* init tx fifo size */ 4610 /* init tx fifo size */
4611 WARN_ON((wlc_hw->corerev - XMTFIFOTBL_STARTREV) < 0 ||
4612 (wlc_hw->corerev - XMTFIFOTBL_STARTREV) >
4613 ARRAY_SIZE(xmtfifo_sz));
4611 wlc_hw->xmtfifo_sz = 4614 wlc_hw->xmtfifo_sz =
4612 xmtfifo_sz[(wlc_hw->corerev - XMTFIFOTBL_STARTREV)]; 4615 xmtfifo_sz[(wlc_hw->corerev - XMTFIFOTBL_STARTREV)];
4616 WARN_ON(!wlc_hw->xmtfifo_sz[0]);
4613 4617
4614 /* Get a phy for this band */ 4618 /* Get a phy for this band */
4615 wlc_hw->band->pi = 4619 wlc_hw->band->pi =
@@ -5049,7 +5053,7 @@ static void brcms_b_hw_up(struct brcms_hardware *wlc_hw)
5049 wlc_hw->wlc->pub->hw_up = true; 5053 wlc_hw->wlc->pub->hw_up = true;
5050 5054
5051 if ((wlc_hw->boardflags & BFL_FEM) 5055 if ((wlc_hw->boardflags & BFL_FEM)
5052 && (ai_get_chip_id(wlc_hw->sih) == BCM4313_CHIP_ID)) { 5056 && (ai_get_chip_id(wlc_hw->sih) == BCMA_CHIP_ID_BCM4313)) {
5053 if (! 5057 if (!
5054 (wlc_hw->boardrev >= 0x1250 5058 (wlc_hw->boardrev >= 0x1250
5055 && (wlc_hw->boardflags & BFL_FEM_BT))) 5059 && (wlc_hw->boardflags & BFL_FEM_BT)))
@@ -5129,6 +5133,8 @@ static void brcms_c_wme_retries_write(struct brcms_c_info *wlc)
5129/* make interface operational */ 5133/* make interface operational */
5130int brcms_c_up(struct brcms_c_info *wlc) 5134int brcms_c_up(struct brcms_c_info *wlc)
5131{ 5135{
5136 struct ieee80211_channel *ch;
5137
5132 BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit); 5138 BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
5133 5139
5134 /* HW is turned off so don't try to access it */ 5140 /* HW is turned off so don't try to access it */
@@ -5141,7 +5147,7 @@ int brcms_c_up(struct brcms_c_info *wlc)
5141 } 5147 }
5142 5148
5143 if ((wlc->pub->boardflags & BFL_FEM) 5149 if ((wlc->pub->boardflags & BFL_FEM)
5144 && (ai_get_chip_id(wlc->hw->sih) == BCM4313_CHIP_ID)) { 5150 && (ai_get_chip_id(wlc->hw->sih) == BCMA_CHIP_ID_BCM4313)) {
5145 if (wlc->pub->boardrev >= 0x1250 5151 if (wlc->pub->boardrev >= 0x1250
5146 && (wlc->pub->boardflags & BFL_FEM_BT)) 5152 && (wlc->pub->boardflags & BFL_FEM_BT))
5147 brcms_b_mhf(wlc->hw, MHF5, MHF5_4313_GPIOCTRL, 5153 brcms_b_mhf(wlc->hw, MHF5, MHF5_4313_GPIOCTRL,
@@ -5195,8 +5201,9 @@ int brcms_c_up(struct brcms_c_info *wlc)
5195 wlc->pub->up = true; 5201 wlc->pub->up = true;
5196 5202
5197 if (wlc->bandinit_pending) { 5203 if (wlc->bandinit_pending) {
5204 ch = wlc->pub->ieee_hw->conf.channel;
5198 brcms_c_suspend_mac_and_wait(wlc); 5205 brcms_c_suspend_mac_and_wait(wlc);
5199 brcms_c_set_chanspec(wlc, wlc->default_bss->chanspec); 5206 brcms_c_set_chanspec(wlc, ch20mhz_chspec(ch->hw_value));
5200 wlc->bandinit_pending = false; 5207 wlc->bandinit_pending = false;
5201 brcms_c_enable_mac(wlc); 5208 brcms_c_enable_mac(wlc);
5202 } 5209 }
@@ -5397,11 +5404,6 @@ int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config)
5397 else 5404 else
5398 return -EINVAL; 5405 return -EINVAL;
5399 5406
5400 /* Legacy or bust when no OFDM is supported by regulatory */
5401 if ((brcms_c_channel_locale_flags_in_band(wlc->cmi, band->bandunit) &
5402 BRCMS_NO_OFDM) && (gmode != GMODE_LEGACY_B))
5403 return -EINVAL;
5404
5405 /* update configuration value */ 5407 /* update configuration value */
5406 if (config) 5408 if (config)
5407 brcms_c_protection_upd(wlc, BRCMS_PROT_G_USER, gmode); 5409 brcms_c_protection_upd(wlc, BRCMS_PROT_G_USER, gmode);
@@ -5782,8 +5784,12 @@ void brcms_c_print_txstatus(struct tx_status *txs)
5782 (txs->ackphyrxsh & PRXS1_SQ_MASK) >> PRXS1_SQ_SHIFT); 5784 (txs->ackphyrxsh & PRXS1_SQ_MASK) >> PRXS1_SQ_SHIFT);
5783} 5785}
5784 5786
5785bool brcms_c_chipmatch(u16 vendor, u16 device) 5787static bool brcms_c_chipmatch_pci(struct bcma_device *core)
5786{ 5788{
5789 struct pci_dev *pcidev = core->bus->host_pci;
5790 u16 vendor = pcidev->vendor;
5791 u16 device = pcidev->device;
5792
5787 if (vendor != PCI_VENDOR_ID_BROADCOM) { 5793 if (vendor != PCI_VENDOR_ID_BROADCOM) {
5788 pr_err("unknown vendor id %04x\n", vendor); 5794 pr_err("unknown vendor id %04x\n", vendor);
5789 return false; 5795 return false;
@@ -5802,6 +5808,30 @@ bool brcms_c_chipmatch(u16 vendor, u16 device)
5802 return false; 5808 return false;
5803} 5809}
5804 5810
5811static bool brcms_c_chipmatch_soc(struct bcma_device *core)
5812{
5813 struct bcma_chipinfo *chipinfo = &core->bus->chipinfo;
5814
5815 if (chipinfo->id == BCMA_CHIP_ID_BCM4716)
5816 return true;
5817
5818 pr_err("unknown chip id %04x\n", chipinfo->id);
5819 return false;
5820}
5821
5822bool brcms_c_chipmatch(struct bcma_device *core)
5823{
5824 switch (core->bus->hosttype) {
5825 case BCMA_HOSTTYPE_PCI:
5826 return brcms_c_chipmatch_pci(core);
5827 case BCMA_HOSTTYPE_SOC:
5828 return brcms_c_chipmatch_soc(core);
5829 default:
5830 pr_err("unknown host type: %i\n", core->bus->hosttype);
5831 return false;
5832 }
5833}
5834
5805#if defined(DEBUG) 5835#if defined(DEBUG)
5806void brcms_c_print_txdesc(struct d11txh *txh) 5836void brcms_c_print_txdesc(struct d11txh *txh)
5807{ 5837{
@@ -8201,19 +8231,12 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
8201void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx) 8231void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
8202{ 8232{
8203 struct bcma_device *core = wlc->hw->d11core; 8233 struct bcma_device *core = wlc->hw->d11core;
8234 struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.channel;
8204 u16 chanspec; 8235 u16 chanspec;
8205 8236
8206 BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit); 8237 BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
8207 8238
8208 /* 8239 chanspec = ch20mhz_chspec(ch->hw_value);
8209 * This will happen if a big-hammer was executed. In
8210 * that case, we want to go back to the channel that
8211 * we were on and not new channel
8212 */
8213 if (wlc->pub->associated)
8214 chanspec = wlc->home_chanspec;
8215 else
8216 chanspec = brcms_c_init_chanspec(wlc);
8217 8240
8218 brcms_b_init(wlc->hw, chanspec); 8241 brcms_b_init(wlc->hw, chanspec);
8219 8242
@@ -8318,7 +8341,7 @@ brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit,
8318 struct brcms_pub *pub; 8341 struct brcms_pub *pub;
8319 8342
8320 /* allocate struct brcms_c_info state and its substructures */ 8343 /* allocate struct brcms_c_info state and its substructures */
8321 wlc = (struct brcms_c_info *) brcms_c_attach_malloc(unit, &err, 0); 8344 wlc = brcms_c_attach_malloc(unit, &err, 0);
8322 if (wlc == NULL) 8345 if (wlc == NULL)
8323 goto fail; 8346 goto fail;
8324 wlc->wiphy = wl->wiphy; 8347 wlc->wiphy = wl->wiphy;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
index 264f8c4c703d..91937c5025ce 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
@@ -198,6 +198,8 @@ u16 read_radio_reg(struct brcms_phy *pi, u16 addr)
198 198
199void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val) 199void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val)
200{ 200{
201 struct si_info *sii = container_of(pi->sh->sih, struct si_info, pub);
202
201 if ((D11REV_GE(pi->sh->corerev, 24)) || 203 if ((D11REV_GE(pi->sh->corerev, 24)) ||
202 (D11REV_IS(pi->sh->corerev, 22) 204 (D11REV_IS(pi->sh->corerev, 22)
203 && (pi->pubpi.phy_type != PHY_TYPE_SSN))) { 205 && (pi->pubpi.phy_type != PHY_TYPE_SSN))) {
@@ -209,7 +211,8 @@ void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val)
209 bcma_write16(pi->d11core, D11REGOFFS(phy4wdatalo), val); 211 bcma_write16(pi->d11core, D11REGOFFS(phy4wdatalo), val);
210 } 212 }
211 213
212 if (++pi->phy_wreg >= pi->phy_wreg_limit) { 214 if ((sii->icbus->hosttype == BCMA_HOSTTYPE_PCI) &&
215 (++pi->phy_wreg >= pi->phy_wreg_limit)) {
213 (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol)); 216 (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
214 pi->phy_wreg = 0; 217 pi->phy_wreg = 0;
215 } 218 }
@@ -292,10 +295,13 @@ void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val)
292 bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr); 295 bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
293 bcma_write16(pi->d11core, D11REGOFFS(phyregdata), val); 296 bcma_write16(pi->d11core, D11REGOFFS(phyregdata), val);
294 if (addr == 0x72) 297 if (addr == 0x72)
295 (void)bcma_read16(pi->d11core, D11REGOFFS(phyversion)); 298 (void)bcma_read16(pi->d11core, D11REGOFFS(phyregdata));
296#else 299#else
300 struct si_info *sii = container_of(pi->sh->sih, struct si_info, pub);
301
297 bcma_write32(pi->d11core, D11REGOFFS(phyregaddr), addr | (val << 16)); 302 bcma_write32(pi->d11core, D11REGOFFS(phyregaddr), addr | (val << 16));
298 if (++pi->phy_wreg >= pi->phy_wreg_limit) { 303 if ((sii->icbus->hosttype == BCMA_HOSTTYPE_PCI) &&
304 (++pi->phy_wreg >= pi->phy_wreg_limit)) {
299 pi->phy_wreg = 0; 305 pi->phy_wreg = 0;
300 (void)bcma_read16(pi->d11core, D11REGOFFS(phyversion)); 306 (void)bcma_read16(pi->d11core, D11REGOFFS(phyversion));
301 } 307 }
@@ -837,7 +843,7 @@ wlc_phy_table_addr(struct brcms_phy *pi, uint tbl_id, uint tbl_offset,
837 pi->tbl_data_hi = tblDataHi; 843 pi->tbl_data_hi = tblDataHi;
838 pi->tbl_data_lo = tblDataLo; 844 pi->tbl_data_lo = tblDataLo;
839 845
840 if (pi->sh->chip == BCM43224_CHIP_ID && 846 if (pi->sh->chip == BCMA_CHIP_ID_BCM43224 &&
841 pi->sh->chiprev == 1) { 847 pi->sh->chiprev == 1) {
842 pi->tbl_addr = tblAddr; 848 pi->tbl_addr = tblAddr;
843 pi->tbl_save_id = tbl_id; 849 pi->tbl_save_id = tbl_id;
@@ -847,7 +853,7 @@ wlc_phy_table_addr(struct brcms_phy *pi, uint tbl_id, uint tbl_offset,
847 853
848void wlc_phy_table_data_write(struct brcms_phy *pi, uint width, u32 val) 854void wlc_phy_table_data_write(struct brcms_phy *pi, uint width, u32 val)
849{ 855{
850 if ((pi->sh->chip == BCM43224_CHIP_ID) && 856 if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224) &&
851 (pi->sh->chiprev == 1) && 857 (pi->sh->chiprev == 1) &&
852 (pi->tbl_save_id == NPHY_TBL_ID_ANTSWCTRLLUT)) { 858 (pi->tbl_save_id == NPHY_TBL_ID_ANTSWCTRLLUT)) {
853 read_phy_reg(pi, pi->tbl_data_lo); 859 read_phy_reg(pi, pi->tbl_data_lo);
@@ -881,7 +887,7 @@ wlc_phy_write_table(struct brcms_phy *pi, const struct phytbl_info *ptbl_info,
881 887
882 for (idx = 0; idx < ptbl_info->tbl_len; idx++) { 888 for (idx = 0; idx < ptbl_info->tbl_len; idx++) {
883 889
884 if ((pi->sh->chip == BCM43224_CHIP_ID) && 890 if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224) &&
885 (pi->sh->chiprev == 1) && 891 (pi->sh->chiprev == 1) &&
886 (tbl_id == NPHY_TBL_ID_ANTSWCTRLLUT)) { 892 (tbl_id == NPHY_TBL_ID_ANTSWCTRLLUT)) {
887 read_phy_reg(pi, tblDataLo); 893 read_phy_reg(pi, tblDataLo);
@@ -918,7 +924,7 @@ wlc_phy_read_table(struct brcms_phy *pi, const struct phytbl_info *ptbl_info,
918 924
919 for (idx = 0; idx < ptbl_info->tbl_len; idx++) { 925 for (idx = 0; idx < ptbl_info->tbl_len; idx++) {
920 926
921 if ((pi->sh->chip == BCM43224_CHIP_ID) && 927 if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224) &&
922 (pi->sh->chiprev == 1)) { 928 (pi->sh->chiprev == 1)) {
923 (void)read_phy_reg(pi, tblDataLo); 929 (void)read_phy_reg(pi, tblDataLo);
924 930
@@ -2894,7 +2900,7 @@ const u8 *wlc_phy_get_ofdm_rate_lookup(void)
2894 2900
2895void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode) 2901void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode)
2896{ 2902{
2897 if ((pi->sh->chip == BCM4313_CHIP_ID) && 2903 if ((pi->sh->chip == BCMA_CHIP_ID_BCM4313) &&
2898 (pi->sh->boardflags & BFL_FEM)) { 2904 (pi->sh->boardflags & BFL_FEM)) {
2899 if (mode) { 2905 if (mode) {
2900 u16 txant = 0; 2906 u16 txant = 0;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
index 13b261517cce..65db9b7458dc 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
@@ -14358,7 +14358,7 @@ void wlc_phy_nphy_tkip_rifs_war(struct brcms_phy *pi, u8 rifs)
14358 14358
14359 wlc_phy_write_txmacreg_nphy(pi, holdoff, delay); 14359 wlc_phy_write_txmacreg_nphy(pi, holdoff, delay);
14360 14360
14361 if (pi && pi->sh && (pi->sh->_rifs_phy != rifs)) 14361 if (pi->sh && (pi->sh->_rifs_phy != rifs))
14362 pi->sh->_rifs_phy = rifs; 14362 pi->sh->_rifs_phy = rifs;
14363} 14363}
14364 14364
@@ -17893,6 +17893,8 @@ static u32 *wlc_phy_get_ipa_gaintbl_nphy(struct brcms_phy *pi)
17893 nphy_tpc_txgain_ipa_2g_2057rev7; 17893 nphy_tpc_txgain_ipa_2g_2057rev7;
17894 } else if (NREV_IS(pi->pubpi.phy_rev, 6)) { 17894 } else if (NREV_IS(pi->pubpi.phy_rev, 6)) {
17895 tx_pwrctrl_tbl = nphy_tpc_txgain_ipa_rev6; 17895 tx_pwrctrl_tbl = nphy_tpc_txgain_ipa_rev6;
17896 if (pi->sh->chip == BCMA_CHIP_ID_BCM47162)
17897 tx_pwrctrl_tbl = nphy_tpc_txgain_ipa_rev5;
17896 } else if (NREV_IS(pi->pubpi.phy_rev, 5)) { 17898 } else if (NREV_IS(pi->pubpi.phy_rev, 5)) {
17897 tx_pwrctrl_tbl = nphy_tpc_txgain_ipa_rev5; 17899 tx_pwrctrl_tbl = nphy_tpc_txgain_ipa_rev5;
17898 } else { 17900 } else {
@@ -19254,8 +19256,14 @@ static void wlc_phy_spurwar_nphy(struct brcms_phy *pi)
19254 case 38: 19256 case 38:
19255 case 102: 19257 case 102:
19256 case 118: 19258 case 118:
19257 nphy_adj_tone_id_buf[0] = 0; 19259 if ((pi->sh->chip == BCMA_CHIP_ID_BCM4716) &&
19258 nphy_adj_noise_var_buf[0] = 0x0; 19260 (pi->sh->chippkg == BCMA_PKG_ID_BCM4717)) {
19261 nphy_adj_tone_id_buf[0] = 32;
19262 nphy_adj_noise_var_buf[0] = 0x21f;
19263 } else {
19264 nphy_adj_tone_id_buf[0] = 0;
19265 nphy_adj_noise_var_buf[0] = 0x0;
19266 }
19259 break; 19267 break;
19260 case 134: 19268 case 134:
19261 nphy_adj_tone_id_buf[0] = 32; 19269 nphy_adj_tone_id_buf[0] = 32;
@@ -19309,8 +19317,8 @@ void wlc_phy_init_nphy(struct brcms_phy *pi)
19309 pi->measure_hold |= PHY_HOLD_FOR_NOT_ASSOC; 19317 pi->measure_hold |= PHY_HOLD_FOR_NOT_ASSOC;
19310 19318
19311 if ((ISNPHY(pi)) && (NREV_GE(pi->pubpi.phy_rev, 5)) && 19319 if ((ISNPHY(pi)) && (NREV_GE(pi->pubpi.phy_rev, 5)) &&
19312 ((pi->sh->chippkg == BCM4717_PKG_ID) || 19320 ((pi->sh->chippkg == BCMA_PKG_ID_BCM4717) ||
19313 (pi->sh->chippkg == BCM4718_PKG_ID))) { 19321 (pi->sh->chippkg == BCMA_PKG_ID_BCM4718))) {
19314 if ((pi->sh->boardflags & BFL_EXTLNA) && 19322 if ((pi->sh->boardflags & BFL_EXTLNA) &&
19315 (CHSPEC_IS2G(pi->radio_chanspec))) 19323 (CHSPEC_IS2G(pi->radio_chanspec)))
19316 ai_cc_reg(pi->sh->sih, 19324 ai_cc_reg(pi->sh->sih,
@@ -19318,6 +19326,10 @@ void wlc_phy_init_nphy(struct brcms_phy *pi)
19318 0x40, 0x40); 19326 0x40, 0x40);
19319 } 19327 }
19320 19328
19329 if ((!PHY_IPA(pi)) && (pi->sh->chip == BCMA_CHIP_ID_BCM5357))
19330 si_pmu_chipcontrol(pi->sh->sih, 1, CCTRL5357_EXTPA,
19331 CCTRL5357_EXTPA);
19332
19321 if ((pi->nphy_gband_spurwar2_en) && CHSPEC_IS2G(pi->radio_chanspec) && 19333 if ((pi->nphy_gband_spurwar2_en) && CHSPEC_IS2G(pi->radio_chanspec) &&
19322 CHSPEC_IS40(pi->radio_chanspec)) { 19334 CHSPEC_IS40(pi->radio_chanspec)) {
19323 19335
@@ -20695,12 +20707,22 @@ wlc_phy_chanspec_radio2056_setup(struct brcms_phy *pi,
20695 write_radio_reg(pi, RADIO_2056_SYN_PLL_LOOPFILTER2 | 20707 write_radio_reg(pi, RADIO_2056_SYN_PLL_LOOPFILTER2 |
20696 RADIO_2056_SYN, 0x1f); 20708 RADIO_2056_SYN, 0x1f);
20697 20709
20698 write_radio_reg(pi, 20710 if ((pi->sh->chip == BCMA_CHIP_ID_BCM4716) ||
20699 RADIO_2056_SYN_PLL_LOOPFILTER4 | 20711 (pi->sh->chip == BCMA_CHIP_ID_BCM47162)) {
20700 RADIO_2056_SYN, 0xb); 20712 write_radio_reg(pi,
20701 write_radio_reg(pi, 20713 RADIO_2056_SYN_PLL_LOOPFILTER4 |
20702 RADIO_2056_SYN_PLL_CP2 | 20714 RADIO_2056_SYN, 0x14);
20703 RADIO_2056_SYN, 0x14); 20715 write_radio_reg(pi,
20716 RADIO_2056_SYN_PLL_CP2 |
20717 RADIO_2056_SYN, 0x00);
20718 } else {
20719 write_radio_reg(pi,
20720 RADIO_2056_SYN_PLL_LOOPFILTER4 |
20721 RADIO_2056_SYN, 0xb);
20722 write_radio_reg(pi,
20723 RADIO_2056_SYN_PLL_CP2 |
20724 RADIO_2056_SYN, 0x14);
20725 }
20704 } 20726 }
20705 } 20727 }
20706 20728
@@ -20747,24 +20769,30 @@ wlc_phy_chanspec_radio2056_setup(struct brcms_phy *pi,
20747 WRITE_RADIO_REG2(pi, RADIO_2056, TX, core, 20769 WRITE_RADIO_REG2(pi, RADIO_2056, TX, core,
20748 PADG_IDAC, 0xcc); 20770 PADG_IDAC, 0xcc);
20749 20771
20750 bias = 0x25; 20772 if ((pi->sh->chip == BCMA_CHIP_ID_BCM4716) ||
20751 cascbias = 0x20; 20773 (pi->sh->chip == BCMA_CHIP_ID_BCM47162)) {
20774 bias = 0x40;
20775 cascbias = 0x45;
20776 pag_boost_tune = 0x5;
20777 pgag_boost_tune = 0x33;
20778 padg_boost_tune = 0x77;
20779 mixg_boost_tune = 0x55;
20780 } else {
20781 bias = 0x25;
20782 cascbias = 0x20;
20752 20783
20753 if ((pi->sh->chip == 20784 if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224 ||
20754 BCM43224_CHIP_ID) 20785 pi->sh->chip == BCMA_CHIP_ID_BCM43225) &&
20755 || (pi->sh->chip == 20786 pi->sh->chippkg == BCMA_PKG_ID_BCM43224_FAB_SMIC) {
20756 BCM43225_CHIP_ID)) {
20757 if (pi->sh->chippkg ==
20758 BCM43224_FAB_SMIC) {
20759 bias = 0x2a; 20787 bias = 0x2a;
20760 cascbias = 0x38; 20788 cascbias = 0x38;
20761 } 20789 }
20762 }
20763 20790
20764 pag_boost_tune = 0x4; 20791 pag_boost_tune = 0x4;
20765 pgag_boost_tune = 0x03; 20792 pgag_boost_tune = 0x03;
20766 padg_boost_tune = 0x77; 20793 padg_boost_tune = 0x77;
20767 mixg_boost_tune = 0x65; 20794 mixg_boost_tune = 0x65;
20795 }
20768 20796
20769 WRITE_RADIO_REG2(pi, RADIO_2056, TX, core, 20797 WRITE_RADIO_REG2(pi, RADIO_2056, TX, core,
20770 INTPAG_IMAIN_STAT, bias); 20798 INTPAG_IMAIN_STAT, bias);
@@ -20863,11 +20891,10 @@ wlc_phy_chanspec_radio2056_setup(struct brcms_phy *pi,
20863 20891
20864 cascbias = 0x30; 20892 cascbias = 0x30;
20865 20893
20866 if ((pi->sh->chip == BCM43224_CHIP_ID) || 20894 if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224 ||
20867 (pi->sh->chip == BCM43225_CHIP_ID)) { 20895 pi->sh->chip == BCMA_CHIP_ID_BCM43225) &&
20868 if (pi->sh->chippkg == BCM43224_FAB_SMIC) 20896 pi->sh->chippkg == BCMA_PKG_ID_BCM43224_FAB_SMIC)
20869 cascbias = 0x35; 20897 cascbias = 0x35;
20870 }
20871 20898
20872 pabias = (pi->phy_pabias == 0) ? 0x30 : pi->phy_pabias; 20899 pabias = (pi->phy_pabias == 0) ? 0x30 : pi->phy_pabias;
20873 20900
@@ -21106,6 +21133,7 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, u16 chanspec,
21106 const struct nphy_sfo_cfg *ci) 21133 const struct nphy_sfo_cfg *ci)
21107{ 21134{
21108 u16 val; 21135 u16 val;
21136 struct si_info *sii = container_of(pi->sh->sih, struct si_info, pub);
21109 21137
21110 val = read_phy_reg(pi, 0x09) & NPHY_BandControl_currentBand; 21138 val = read_phy_reg(pi, 0x09) & NPHY_BandControl_currentBand;
21111 if (CHSPEC_IS5G(chanspec) && !val) { 21139 if (CHSPEC_IS5G(chanspec) && !val) {
@@ -21178,22 +21206,32 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, u16 chanspec,
21178 } else if (NREV_GE(pi->pubpi.phy_rev, 7)) { 21206 } else if (NREV_GE(pi->pubpi.phy_rev, 7)) {
21179 if (val == 54) 21207 if (val == 54)
21180 spuravoid = 1; 21208 spuravoid = 1;
21181 } else { 21209 } else if (pi->nphy_aband_spurwar_en &&
21182 if (pi->nphy_aband_spurwar_en && 21210 ((val == 38) || (val == 102) || (val == 118))) {
21183 ((val == 38) || (val == 102) 21211 if ((pi->sh->chip == BCMA_CHIP_ID_BCM4716)
21184 || (val == 118))) 21212 && (pi->sh->chippkg == BCMA_PKG_ID_BCM4717)) {
21213 spuravoid = 0;
21214 } else {
21185 spuravoid = 1; 21215 spuravoid = 1;
21216 }
21186 } 21217 }
21187 21218
21188 if (pi->phy_spuravoid == SPURAVOID_FORCEON) 21219 if (pi->phy_spuravoid == SPURAVOID_FORCEON)
21189 spuravoid = 1; 21220 spuravoid = 1;
21190 21221
21191 wlapi_bmac_core_phypll_ctl(pi->sh->physhim, false); 21222 if ((pi->sh->chip == BCMA_CHIP_ID_BCM4716) ||
21192 si_pmu_spuravoid_pllupdate(pi->sh->sih, spuravoid); 21223 (pi->sh->chip == BCMA_CHIP_ID_BCM43225)) {
21193 wlapi_bmac_core_phypll_ctl(pi->sh->physhim, true); 21224 bcma_pmu_spuravoid_pllupdate(&sii->icbus->drv_cc,
21225 spuravoid);
21226 } else {
21227 wlapi_bmac_core_phypll_ctl(pi->sh->physhim, false);
21228 bcma_pmu_spuravoid_pllupdate(&sii->icbus->drv_cc,
21229 spuravoid);
21230 wlapi_bmac_core_phypll_ctl(pi->sh->physhim, true);
21231 }
21194 21232
21195 if ((pi->sh->chip == BCM43224_CHIP_ID) || 21233 if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224) ||
21196 (pi->sh->chip == BCM43225_CHIP_ID)) { 21234 (pi->sh->chip == BCMA_CHIP_ID_BCM43225)) {
21197 if (spuravoid == 1) { 21235 if (spuravoid == 1) {
21198 bcma_write16(pi->d11core, 21236 bcma_write16(pi->d11core,
21199 D11REGOFFS(tsf_clk_frac_l), 21237 D11REGOFFS(tsf_clk_frac_l),
@@ -21209,7 +21247,9 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, u16 chanspec,
21209 } 21247 }
21210 } 21248 }
21211 21249
21212 wlapi_bmac_core_phypll_reset(pi->sh->physhim); 21250 if (!((pi->sh->chip == BCMA_CHIP_ID_BCM4716) ||
21251 (pi->sh->chip == BCMA_CHIP_ID_BCM47162)))
21252 wlapi_bmac_core_phypll_reset(pi->sh->physhim);
21213 21253
21214 mod_phy_reg(pi, 0x01, (0x1 << 15), 21254 mod_phy_reg(pi, 0x01, (0x1 << 15),
21215 ((spuravoid > 0) ? (0x1 << 15) : 0)); 21255 ((spuravoid > 0) ? (0x1 << 15) : 0));
@@ -22171,9 +22211,15 @@ s16 wlc_phy_tempsense_nphy(struct brcms_phy *pi)
22171 wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 1, 0x03, 16, 22211 wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 1, 0x03, 16,
22172 &auxADC_rssi_ctrlH_save); 22212 &auxADC_rssi_ctrlH_save);
22173 22213
22174 radio_temp[0] = (179 * (radio_temp[1] + radio_temp2[1]) 22214 if (pi->sh->chip == BCMA_CHIP_ID_BCM5357) {
22175 + 82 * (auxADC_Vl) - 28861 + 22215 radio_temp[0] = (193 * (radio_temp[1] + radio_temp2[1])
22176 128) / 256; 22216 + 88 * (auxADC_Vl) - 27111 +
22217 128) / 256;
22218 } else {
22219 radio_temp[0] = (179 * (radio_temp[1] + radio_temp2[1])
22220 + 82 * (auxADC_Vl) - 28861 +
22221 128) / 256;
22222 }
22177 22223
22178 offset = (s16) pi->phy_tempsense_offset; 22224 offset = (s16) pi->phy_tempsense_offset;
22179 22225
@@ -24923,14 +24969,16 @@ wlc_phy_a2_nphy(struct brcms_phy *pi, struct nphy_ipa_txcalgains *txgains,
24923 if (txgains->useindex) { 24969 if (txgains->useindex) {
24924 phy_a4 = 15 - ((txgains->index) >> 3); 24970 phy_a4 = 15 - ((txgains->index) >> 3);
24925 if (CHSPEC_IS2G(pi->radio_chanspec)) { 24971 if (CHSPEC_IS2G(pi->radio_chanspec)) {
24926 if (NREV_GE(pi->pubpi.phy_rev, 6)) 24972 if (NREV_GE(pi->pubpi.phy_rev, 6) &&
24973 pi->sh->chip == BCMA_CHIP_ID_BCM47162) {
24974 phy_a5 = 0x10f7 | (phy_a4 << 8);
24975 } else if (NREV_GE(pi->pubpi.phy_rev, 6)) {
24927 phy_a5 = 0x00f7 | (phy_a4 << 8); 24976 phy_a5 = 0x00f7 | (phy_a4 << 8);
24928 24977 } else if (NREV_IS(pi->pubpi.phy_rev, 5)) {
24929 else
24930 if (NREV_IS(pi->pubpi.phy_rev, 5))
24931 phy_a5 = 0x10f7 | (phy_a4 << 8); 24978 phy_a5 = 0x10f7 | (phy_a4 << 8);
24932 else 24979 } else {
24933 phy_a5 = 0x50f7 | (phy_a4 << 8); 24980 phy_a5 = 0x50f7 | (phy_a4 << 8);
24981 }
24934 } else { 24982 } else {
24935 phy_a5 = 0x70f7 | (phy_a4 << 8); 24983 phy_a5 = 0x70f7 | (phy_a4 << 8);
24936 } 24984 }
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pmu.c b/drivers/net/wireless/brcm80211/brcmsmac/pmu.c
index 4931d29d077b..7e9df566c733 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pmu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pmu.c
@@ -74,16 +74,6 @@
74 * PMU<rev>_PLL<num>_XX where <rev> is PMU corerev and <num> is an arbitrary 74 * PMU<rev>_PLL<num>_XX where <rev> is PMU corerev and <num> is an arbitrary
75 * number to differentiate different PLLs controlled by the same PMU rev. 75 * number to differentiate different PLLs controlled by the same PMU rev.
76 */ 76 */
77/* pllcontrol registers:
78 * ndiv_pwrdn, pwrdn_ch<x>, refcomp_pwrdn, dly_ch<x>,
79 * p1div, p2div, _bypass_sdmod
80 */
81#define PMU1_PLL0_PLLCTL0 0
82#define PMU1_PLL0_PLLCTL1 1
83#define PMU1_PLL0_PLLCTL2 2
84#define PMU1_PLL0_PLLCTL3 3
85#define PMU1_PLL0_PLLCTL4 4
86#define PMU1_PLL0_PLLCTL5 5
87 77
88/* pmu XtalFreqRatio */ 78/* pmu XtalFreqRatio */
89#define PMU_XTALFREQ_REG_ILPCTR_MASK 0x00001FFF 79#define PMU_XTALFREQ_REG_ILPCTR_MASK 0x00001FFF
@@ -108,118 +98,14 @@
108#define RES4313_HT_AVAIL_RSRC 14 98#define RES4313_HT_AVAIL_RSRC 14
109#define RES4313_MACPHY_CLK_AVAIL_RSRC 15 99#define RES4313_MACPHY_CLK_AVAIL_RSRC 15
110 100
111/* Determine min/max rsrc masks. Value 0 leaves hardware at default. */
112static void si_pmu_res_masks(struct si_pub *sih, u32 * pmin, u32 * pmax)
113{
114 u32 min_mask = 0, max_mask = 0;
115 uint rsrcs;
116
117 /* # resources */
118 rsrcs = (ai_get_pmucaps(sih) & PCAP_RC_MASK) >> PCAP_RC_SHIFT;
119
120 /* determine min/max rsrc masks */
121 switch (ai_get_chip_id(sih)) {
122 case BCM43224_CHIP_ID:
123 case BCM43225_CHIP_ID:
124 /* ??? */
125 break;
126
127 case BCM4313_CHIP_ID:
128 min_mask = PMURES_BIT(RES4313_BB_PU_RSRC) |
129 PMURES_BIT(RES4313_XTAL_PU_RSRC) |
130 PMURES_BIT(RES4313_ALP_AVAIL_RSRC) |
131 PMURES_BIT(RES4313_BB_PLL_PWRSW_RSRC);
132 max_mask = 0xffff;
133 break;
134 default:
135 break;
136 }
137
138 *pmin = min_mask;
139 *pmax = max_mask;
140}
141
142void si_pmu_spuravoid_pllupdate(struct si_pub *sih, u8 spuravoid)
143{
144 u32 tmp = 0;
145 struct bcma_device *core;
146
147 /* switch to chipc */
148 core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
149
150 switch (ai_get_chip_id(sih)) {
151 case BCM43224_CHIP_ID:
152 case BCM43225_CHIP_ID:
153 if (spuravoid == 1) {
154 bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
155 PMU1_PLL0_PLLCTL0);
156 bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
157 0x11500010);
158 bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
159 PMU1_PLL0_PLLCTL1);
160 bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
161 0x000C0C06);
162 bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
163 PMU1_PLL0_PLLCTL2);
164 bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
165 0x0F600a08);
166 bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
167 PMU1_PLL0_PLLCTL3);
168 bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
169 0x00000000);
170 bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
171 PMU1_PLL0_PLLCTL4);
172 bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
173 0x2001E920);
174 bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
175 PMU1_PLL0_PLLCTL5);
176 bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
177 0x88888815);
178 } else {
179 bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
180 PMU1_PLL0_PLLCTL0);
181 bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
182 0x11100010);
183 bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
184 PMU1_PLL0_PLLCTL1);
185 bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
186 0x000c0c06);
187 bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
188 PMU1_PLL0_PLLCTL2);
189 bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
190 0x03000a08);
191 bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
192 PMU1_PLL0_PLLCTL3);
193 bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
194 0x00000000);
195 bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
196 PMU1_PLL0_PLLCTL4);
197 bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
198 0x200005c0);
199 bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
200 PMU1_PLL0_PLLCTL5);
201 bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
202 0x88888815);
203 }
204 tmp = 1 << 10;
205 break;
206
207 default:
208 /* bail out */
209 return;
210 }
211
212 bcma_set32(core, CHIPCREGOFFS(pmucontrol), tmp);
213}
214
215u16 si_pmu_fast_pwrup_delay(struct si_pub *sih) 101u16 si_pmu_fast_pwrup_delay(struct si_pub *sih)
216{ 102{
217 uint delay = PMU_MAX_TRANSITION_DLY; 103 uint delay = PMU_MAX_TRANSITION_DLY;
218 104
219 switch (ai_get_chip_id(sih)) { 105 switch (ai_get_chip_id(sih)) {
220 case BCM43224_CHIP_ID: 106 case BCMA_CHIP_ID_BCM43224:
221 case BCM43225_CHIP_ID: 107 case BCMA_CHIP_ID_BCM43225:
222 case BCM4313_CHIP_ID: 108 case BCMA_CHIP_ID_BCM4313:
223 delay = 3700; 109 delay = 3700;
224 break; 110 break;
225 default: 111 default:
@@ -270,9 +156,9 @@ u32 si_pmu_alp_clock(struct si_pub *sih)
270 return clock; 156 return clock;
271 157
272 switch (ai_get_chip_id(sih)) { 158 switch (ai_get_chip_id(sih)) {
273 case BCM43224_CHIP_ID: 159 case BCMA_CHIP_ID_BCM43224:
274 case BCM43225_CHIP_ID: 160 case BCMA_CHIP_ID_BCM43225:
275 case BCM4313_CHIP_ID: 161 case BCMA_CHIP_ID_BCM4313:
276 /* always 20Mhz */ 162 /* always 20Mhz */
277 clock = 20000 * 1000; 163 clock = 20000 * 1000;
278 break; 164 break;
@@ -283,51 +169,9 @@ u32 si_pmu_alp_clock(struct si_pub *sih)
283 return clock; 169 return clock;
284} 170}
285 171
286/* initialize PMU */
287void si_pmu_init(struct si_pub *sih)
288{
289 struct bcma_device *core;
290
291 /* select chipc */
292 core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
293
294 if (ai_get_pmurev(sih) == 1)
295 bcma_mask32(core, CHIPCREGOFFS(pmucontrol),
296 ~PCTL_NOILP_ON_WAIT);
297 else if (ai_get_pmurev(sih) >= 2)
298 bcma_set32(core, CHIPCREGOFFS(pmucontrol), PCTL_NOILP_ON_WAIT);
299}
300
301/* initialize PMU resources */
302void si_pmu_res_init(struct si_pub *sih)
303{
304 struct bcma_device *core;
305 u32 min_mask = 0, max_mask = 0;
306
307 /* select to chipc */
308 core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
309
310 /* Determine min/max rsrc masks */
311 si_pmu_res_masks(sih, &min_mask, &max_mask);
312
313 /* It is required to program max_mask first and then min_mask */
314
315 /* Program max resource mask */
316
317 if (max_mask)
318 bcma_write32(core, CHIPCREGOFFS(max_res_mask), max_mask);
319
320 /* Program min resource mask */
321
322 if (min_mask)
323 bcma_write32(core, CHIPCREGOFFS(min_res_mask), min_mask);
324
325 /* Add some delay; allow resources to come up and settle. */
326 mdelay(2);
327}
328
329u32 si_pmu_measure_alpclk(struct si_pub *sih) 172u32 si_pmu_measure_alpclk(struct si_pub *sih)
330{ 173{
174 struct si_info *sii = container_of(sih, struct si_info, pub);
331 struct bcma_device *core; 175 struct bcma_device *core;
332 u32 alp_khz; 176 u32 alp_khz;
333 177
@@ -335,7 +179,7 @@ u32 si_pmu_measure_alpclk(struct si_pub *sih)
335 return 0; 179 return 0;
336 180
337 /* Remember original core before switch to chipc */ 181 /* Remember original core before switch to chipc */
338 core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0); 182 core = sii->icbus->drv_cc.core;
339 183
340 if (bcma_read32(core, CHIPCREGOFFS(pmustatus)) & PST_EXTLPOAVAIL) { 184 if (bcma_read32(core, CHIPCREGOFFS(pmustatus)) & PST_EXTLPOAVAIL) {
341 u32 ilp_ctr, alp_hz; 185 u32 ilp_ctr, alp_hz;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pmu.h b/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
index 3e39c5e0f9ff..f7cff873578b 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
@@ -26,10 +26,7 @@ extern u32 si_pmu_chipcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
26extern u32 si_pmu_regcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val); 26extern u32 si_pmu_regcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
27extern u32 si_pmu_alp_clock(struct si_pub *sih); 27extern u32 si_pmu_alp_clock(struct si_pub *sih);
28extern void si_pmu_pllupd(struct si_pub *sih); 28extern void si_pmu_pllupd(struct si_pub *sih);
29extern void si_pmu_spuravoid_pllupdate(struct si_pub *sih, u8 spuravoid);
30extern u32 si_pmu_pllcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val); 29extern u32 si_pmu_pllcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
31extern void si_pmu_init(struct si_pub *sih);
32extern void si_pmu_res_init(struct si_pub *sih);
33extern u32 si_pmu_measure_alpclk(struct si_pub *sih); 30extern u32 si_pmu_measure_alpclk(struct si_pub *sih);
34 31
35#endif /* _BRCM_PMU_H_ */ 32#endif /* _BRCM_PMU_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pub.h b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
index aa5d67f8d874..5855f4fd16dc 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pub.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
@@ -311,7 +311,7 @@ extern uint brcms_c_detach(struct brcms_c_info *wlc);
311extern int brcms_c_up(struct brcms_c_info *wlc); 311extern int brcms_c_up(struct brcms_c_info *wlc);
312extern uint brcms_c_down(struct brcms_c_info *wlc); 312extern uint brcms_c_down(struct brcms_c_info *wlc);
313 313
314extern bool brcms_c_chipmatch(u16 vendor, u16 device); 314extern bool brcms_c_chipmatch(struct bcma_device *core);
315extern void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx); 315extern void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx);
316extern void brcms_c_reset(struct brcms_c_info *wlc); 316extern void brcms_c_reset(struct brcms_c_info *wlc);
317 317
diff --git a/drivers/net/wireless/brcm80211/brcmutil/utils.c b/drivers/net/wireless/brcm80211/brcmutil/utils.c
index b45ab34cdfdc..3e6405e06ac0 100644
--- a/drivers/net/wireless/brcm80211/brcmutil/utils.c
+++ b/drivers/net/wireless/brcm80211/brcmutil/utils.c
@@ -43,6 +43,8 @@ EXPORT_SYMBOL(brcmu_pkt_buf_get_skb);
43/* Free the driver packet. Free the tag if present */ 43/* Free the driver packet. Free the tag if present */
44void brcmu_pkt_buf_free_skb(struct sk_buff *skb) 44void brcmu_pkt_buf_free_skb(struct sk_buff *skb)
45{ 45{
46 if (!skb)
47 return;
46 WARN_ON(skb->next); 48 WARN_ON(skb->next);
47 if (skb->destructor) 49 if (skb->destructor)
48 /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if 50 /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index 333193f20e1c..bcc79b4e3267 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -37,5 +37,6 @@
37#define BCM4329_CHIP_ID 0x4329 37#define BCM4329_CHIP_ID 0x4329
38#define BCM4330_CHIP_ID 0x4330 38#define BCM4330_CHIP_ID 0x4330
39#define BCM4331_CHIP_ID 0x4331 39#define BCM4331_CHIP_ID 0x4331
40#define BCM4334_CHIP_ID 0x4334
40 41
41#endif /* _BRCM_HW_IDS_H_ */ 42#endif /* _BRCM_HW_IDS_H_ */
diff --git a/drivers/net/wireless/brcm80211/include/soc.h b/drivers/net/wireless/brcm80211/include/soc.h
index 4e9b7e4827ea..123cfa854a0d 100644
--- a/drivers/net/wireless/brcm80211/include/soc.h
+++ b/drivers/net/wireless/brcm80211/include/soc.h
@@ -19,68 +19,6 @@
19 19
20#define SI_ENUM_BASE 0x18000000 /* Enumeration space base */ 20#define SI_ENUM_BASE 0x18000000 /* Enumeration space base */
21 21
22/* core codes */
23#define NODEV_CORE_ID 0x700 /* Invalid coreid */
24#define CC_CORE_ID 0x800 /* chipcommon core */
25#define ILINE20_CORE_ID 0x801 /* iline20 core */
26#define SRAM_CORE_ID 0x802 /* sram core */
27#define SDRAM_CORE_ID 0x803 /* sdram core */
28#define PCI_CORE_ID 0x804 /* pci core */
29#define MIPS_CORE_ID 0x805 /* mips core */
30#define ENET_CORE_ID 0x806 /* enet mac core */
31#define CODEC_CORE_ID 0x807 /* v90 codec core */
32#define USB_CORE_ID 0x808 /* usb 1.1 host/device core */
33#define ADSL_CORE_ID 0x809 /* ADSL core */
34#define ILINE100_CORE_ID 0x80a /* iline100 core */
35#define IPSEC_CORE_ID 0x80b /* ipsec core */
36#define UTOPIA_CORE_ID 0x80c /* utopia core */
37#define PCMCIA_CORE_ID 0x80d /* pcmcia core */
38#define SOCRAM_CORE_ID 0x80e /* internal memory core */
39#define MEMC_CORE_ID 0x80f /* memc sdram core */
40#define OFDM_CORE_ID 0x810 /* OFDM phy core */
41#define EXTIF_CORE_ID 0x811 /* external interface core */
42#define D11_CORE_ID 0x812 /* 802.11 MAC core */
43#define APHY_CORE_ID 0x813 /* 802.11a phy core */
44#define BPHY_CORE_ID 0x814 /* 802.11b phy core */
45#define GPHY_CORE_ID 0x815 /* 802.11g phy core */
46#define MIPS33_CORE_ID 0x816 /* mips3302 core */
47#define USB11H_CORE_ID 0x817 /* usb 1.1 host core */
48#define USB11D_CORE_ID 0x818 /* usb 1.1 device core */
49#define USB20H_CORE_ID 0x819 /* usb 2.0 host core */
50#define USB20D_CORE_ID 0x81a /* usb 2.0 device core */
51#define SDIOH_CORE_ID 0x81b /* sdio host core */
52#define ROBO_CORE_ID 0x81c /* roboswitch core */
53#define ATA100_CORE_ID 0x81d /* parallel ATA core */
54#define SATAXOR_CORE_ID 0x81e /* serial ATA & XOR DMA core */
55#define GIGETH_CORE_ID 0x81f /* gigabit ethernet core */
56#define PCIE_CORE_ID 0x820 /* pci express core */
57#define NPHY_CORE_ID 0x821 /* 802.11n 2x2 phy core */
58#define SRAMC_CORE_ID 0x822 /* SRAM controller core */
59#define MINIMAC_CORE_ID 0x823 /* MINI MAC/phy core */
60#define ARM11_CORE_ID 0x824 /* ARM 1176 core */
61#define ARM7S_CORE_ID 0x825 /* ARM7tdmi-s core */
62#define LPPHY_CORE_ID 0x826 /* 802.11a/b/g phy core */
63#define PMU_CORE_ID 0x827 /* PMU core */
64#define SSNPHY_CORE_ID 0x828 /* 802.11n single-stream phy core */
65#define SDIOD_CORE_ID 0x829 /* SDIO device core */
66#define ARMCM3_CORE_ID 0x82a /* ARM Cortex M3 core */
67#define HTPHY_CORE_ID 0x82b /* 802.11n 4x4 phy core */
68#define MIPS74K_CORE_ID 0x82c /* mips 74k core */
69#define GMAC_CORE_ID 0x82d /* Gigabit MAC core */
70#define DMEMC_CORE_ID 0x82e /* DDR1/2 memory controller core */
71#define PCIERC_CORE_ID 0x82f /* PCIE Root Complex core */
72#define OCP_CORE_ID 0x830 /* OCP2OCP bridge core */
73#define SC_CORE_ID 0x831 /* shared common core */
74#define AHB_CORE_ID 0x832 /* OCP2AHB bridge core */
75#define SPIH_CORE_ID 0x833 /* SPI host core */
76#define I2S_CORE_ID 0x834 /* I2S core */
77#define DMEMS_CORE_ID 0x835 /* SDR/DDR1 memory controller core */
78#define DEF_SHIM_COMP 0x837 /* SHIM component in ubus/6362 */
79#define OOB_ROUTER_CORE_ID 0x367 /* OOB router core ID */
80#define DEF_AI_COMP 0xfff /* Default component, in ai chips it
81 * maps all unused address ranges
82 */
83
84/* Common core control flags */ 22/* Common core control flags */
85#define SICF_BIST_EN 0x8000 23#define SICF_BIST_EN 0x8000
86#define SICF_PME_EN 0x4000 24#define SICF_PME_EN 0x4000
diff --git a/drivers/net/wireless/hostap/hostap_proc.c b/drivers/net/wireless/hostap/hostap_proc.c
index 75ef8f04aabe..dc447c1b5abe 100644
--- a/drivers/net/wireless/hostap/hostap_proc.c
+++ b/drivers/net/wireless/hostap/hostap_proc.c
@@ -58,8 +58,7 @@ static int prism2_stats_proc_read(char *page, char **start, off_t off,
58{ 58{
59 char *p = page; 59 char *p = page;
60 local_info_t *local = (local_info_t *) data; 60 local_info_t *local = (local_info_t *) data;
61 struct comm_tallies_sums *sums = (struct comm_tallies_sums *) 61 struct comm_tallies_sums *sums = &local->comm_tallies;
62 &local->comm_tallies;
63 62
64 if (off != 0) { 63 if (off != 0) {
65 *eof = 1; 64 *eof = 1;
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 0036737fe8e3..0df459147394 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -2701,6 +2701,20 @@ static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2701 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6); 2701 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2702} 2702}
2703 2703
2704static void ipw_read_eeprom(struct ipw_priv *priv)
2705{
2706 int i;
2707 __le16 *eeprom = (__le16 *) priv->eeprom;
2708
2709 IPW_DEBUG_TRACE(">>\n");
2710
2711 /* read entire contents of eeprom into private buffer */
2712 for (i = 0; i < 128; i++)
2713 eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
2714
2715 IPW_DEBUG_TRACE("<<\n");
2716}
2717
2704/* 2718/*
2705 * Either the device driver (i.e. the host) or the firmware can 2719 * Either the device driver (i.e. the host) or the firmware can
2706 * load eeprom data into the designated region in SRAM. If neither 2720 * load eeprom data into the designated region in SRAM. If neither
@@ -2712,14 +2726,9 @@ static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2712static void ipw_eeprom_init_sram(struct ipw_priv *priv) 2726static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2713{ 2727{
2714 int i; 2728 int i;
2715 __le16 *eeprom = (__le16 *) priv->eeprom;
2716 2729
2717 IPW_DEBUG_TRACE(">>\n"); 2730 IPW_DEBUG_TRACE(">>\n");
2718 2731
2719 /* read entire contents of eeprom into private buffer */
2720 for (i = 0; i < 128; i++)
2721 eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
2722
2723 /* 2732 /*
2724 If the data looks correct, then copy it to our private 2733 If the data looks correct, then copy it to our private
2725 copy. Otherwise let the firmware know to perform the operation 2734 copy. Otherwise let the firmware know to perform the operation
@@ -3643,8 +3652,10 @@ static int ipw_load(struct ipw_priv *priv)
3643 /* ack fw init done interrupt */ 3652 /* ack fw init done interrupt */
3644 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE); 3653 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3645 3654
3646 /* read eeprom data and initialize the eeprom region of sram */ 3655 /* read eeprom data */
3647 priv->eeprom_delay = 1; 3656 priv->eeprom_delay = 1;
3657 ipw_read_eeprom(priv);
3658 /* initialize the eeprom region of sram */
3648 ipw_eeprom_init_sram(priv); 3659 ipw_eeprom_init_sram(priv);
3649 3660
3650 /* enable interrupts */ 3661 /* enable interrupts */
@@ -7069,9 +7080,7 @@ static int ipw_qos_activate(struct ipw_priv *priv,
7069 } 7080 }
7070 7081
7071 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n"); 7082 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
7072 err = ipw_send_qos_params_command(priv, 7083 err = ipw_send_qos_params_command(priv, &qos_parameters[0]);
7073 (struct libipw_qos_parameters *)
7074 &(qos_parameters[0]));
7075 if (err) 7084 if (err)
7076 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n"); 7085 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
7077 7086
diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c
index 4b10157d8686..d4fd29ad90dc 100644
--- a/drivers/net/wireless/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/3945-rs.c
@@ -946,7 +946,7 @@ il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
946 case IEEE80211_BAND_5GHZ: 946 case IEEE80211_BAND_5GHZ:
947 rs_sta->expected_tpt = il3945_expected_tpt_a; 947 rs_sta->expected_tpt = il3945_expected_tpt_a;
948 break; 948 break;
949 case IEEE80211_NUM_BANDS: 949 default:
950 BUG(); 950 BUG();
951 break; 951 break;
952 } 952 }
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index ff5d689e13f3..34f61a0581a2 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -5724,7 +5724,8 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
5724 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC); 5724 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
5725 5725
5726 hw->wiphy->flags |= 5726 hw->wiphy->flags |=
5727 WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS; 5727 WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS |
5728 WIPHY_FLAG_IBSS_RSN;
5728 5729
5729 /* 5730 /*
5730 * For now, disable PS by default because it affects 5731 * For now, disable PS by default because it affects
@@ -5873,6 +5874,16 @@ il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
5873 return -EOPNOTSUPP; 5874 return -EOPNOTSUPP;
5874 } 5875 }
5875 5876
5877 /*
5878 * To support IBSS RSN, don't program group keys in IBSS, the
5879 * hardware will then not attempt to decrypt the frames.
5880 */
5881 if (vif->type == NL80211_IFTYPE_ADHOC &&
5882 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
5883 D_MAC80211("leave - ad-hoc group key\n");
5884 return -EOPNOTSUPP;
5885 }
5886
5876 sta_id = il_sta_id_or_broadcast(il, sta); 5887 sta_id = il_sta_id_or_broadcast(il, sta);
5877 if (sta_id == IL_INVALID_STATION) 5888 if (sta_id == IL_INVALID_STATION)
5878 return -EINVAL; 5889 return -EINVAL;
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index 5d4807c2b56d..0f8a7703eea3 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -4717,10 +4717,11 @@ il_check_stuck_queue(struct il_priv *il, int cnt)
4717 struct il_tx_queue *txq = &il->txq[cnt]; 4717 struct il_tx_queue *txq = &il->txq[cnt];
4718 struct il_queue *q = &txq->q; 4718 struct il_queue *q = &txq->q;
4719 unsigned long timeout; 4719 unsigned long timeout;
4720 unsigned long now = jiffies;
4720 int ret; 4721 int ret;
4721 4722
4722 if (q->read_ptr == q->write_ptr) { 4723 if (q->read_ptr == q->write_ptr) {
4723 txq->time_stamp = jiffies; 4724 txq->time_stamp = now;
4724 return 0; 4725 return 0;
4725 } 4726 }
4726 4727
@@ -4728,9 +4729,9 @@ il_check_stuck_queue(struct il_priv *il, int cnt)
4728 txq->time_stamp + 4729 txq->time_stamp +
4729 msecs_to_jiffies(il->cfg->wd_timeout); 4730 msecs_to_jiffies(il->cfg->wd_timeout);
4730 4731
4731 if (time_after(jiffies, timeout)) { 4732 if (time_after(now, timeout)) {
4732 IL_ERR("Queue %d stuck for %u ms.\n", q->id, 4733 IL_ERR("Queue %d stuck for %u ms.\n", q->id,
4733 il->cfg->wd_timeout); 4734 jiffies_to_msecs(now - txq->time_stamp));
4734 ret = il_force_reset(il, false); 4735 ret = il_force_reset(il, false);
4735 return (ret == -EAGAIN) ? 0 : 1; 4736 return (ret == -EAGAIN) ? 0 : 1;
4736 } 4737 }
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 2463c0626438..727fbb5db9da 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -6,6 +6,7 @@ config IWLWIFI
6 select LEDS_CLASS 6 select LEDS_CLASS
7 select LEDS_TRIGGERS 7 select LEDS_TRIGGERS
8 select MAC80211_LEDS 8 select MAC80211_LEDS
9 select IWLDVM
9 ---help--- 10 ---help---
10 Select to build the driver supporting the: 11 Select to build the driver supporting the:
11 12
@@ -41,6 +42,10 @@ config IWLWIFI
41 say M here and read <file:Documentation/kbuild/modules.txt>. The 42 say M here and read <file:Documentation/kbuild/modules.txt>. The
42 module will be called iwlwifi. 43 module will be called iwlwifi.
43 44
45config IWLDVM
46 tristate "Intel Wireless WiFi"
47 depends on IWLWIFI
48
44menu "Debugging Options" 49menu "Debugging Options"
45 depends on IWLWIFI 50 depends on IWLWIFI
46 51
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index d615eacbf050..170ec330d2a9 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -1,27 +1,19 @@
1# WIFI 1# common
2obj-$(CONFIG_IWLWIFI) += iwlwifi.o 2obj-$(CONFIG_IWLWIFI) += iwlwifi.o
3iwlwifi-objs := iwl-agn.o iwl-agn-rs.o iwl-mac80211.o 3iwlwifi-objs += iwl-io.o
4iwlwifi-objs += iwl-ucode.o iwl-agn-tx.o iwl-debug.o
5iwlwifi-objs += iwl-agn-lib.o iwl-agn-calib.o iwl-io.o
6iwlwifi-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-rx.o
7
8iwlwifi-objs += iwl-eeprom.o iwl-power.o
9iwlwifi-objs += iwl-scan.o iwl-led.o
10iwlwifi-objs += iwl-agn-rxon.o iwl-agn-devices.o
11iwlwifi-objs += iwl-5000.o
12iwlwifi-objs += iwl-6000.o
13iwlwifi-objs += iwl-1000.o
14iwlwifi-objs += iwl-2000.o
15iwlwifi-objs += iwl-pci.o
16iwlwifi-objs += iwl-drv.o 4iwlwifi-objs += iwl-drv.o
5iwlwifi-objs += iwl-debug.o
17iwlwifi-objs += iwl-notif-wait.o 6iwlwifi-objs += iwl-notif-wait.o
18iwlwifi-objs += iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o 7iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
19 8iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
9iwlwifi-objs += pcie/1000.o pcie/2000.o pcie/5000.o pcie/6000.o
20 10
21iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
22iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o 11iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
23iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-testmode.o 12iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-test.o
24 13
25CFLAGS_iwl-devtrace.o := -I$(src) 14ccflags-y += -D__CHECK_ENDIAN__ -I$(src)
26 15
27ccflags-y += -D__CHECK_ENDIAN__ 16
17obj-$(CONFIG_IWLDVM) += dvm/
18
19CFLAGS_iwl-devtrace.o := -I$(src)
diff --git a/drivers/net/wireless/iwlwifi/dvm/Makefile b/drivers/net/wireless/iwlwifi/dvm/Makefile
new file mode 100644
index 000000000000..5ff76b204141
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/dvm/Makefile
@@ -0,0 +1,13 @@
1# DVM
2obj-$(CONFIG_IWLDVM) += iwldvm.o
3iwldvm-objs += main.o rs.o mac80211.o ucode.o tx.o
4iwldvm-objs += lib.o calib.o tt.o sta.o rx.o
5
6iwldvm-objs += power.o
7iwldvm-objs += scan.o led.o
8iwldvm-objs += rxon.o devices.o
9
10iwldvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
11iwldvm-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += testmode.o
12
13ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index 79c0fe06f4db..9bb16bdf6d26 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -63,9 +63,10 @@
63#ifndef __iwl_agn_h__ 63#ifndef __iwl_agn_h__
64#define __iwl_agn_h__ 64#define __iwl_agn_h__
65 65
66#include "iwl-dev.h"
67#include "iwl-config.h" 66#include "iwl-config.h"
68 67
68#include "dev.h"
69
69/* The first 11 queues (0-10) are used otherwise */ 70/* The first 11 queues (0-10) are used otherwise */
70#define IWLAGN_FIRST_AMPDU_QUEUE 11 71#define IWLAGN_FIRST_AMPDU_QUEUE 11
71 72
@@ -91,7 +92,6 @@ extern struct iwl_lib_ops iwl6030_lib;
91#define STATUS_CT_KILL 1 92#define STATUS_CT_KILL 1
92#define STATUS_ALIVE 2 93#define STATUS_ALIVE 2
93#define STATUS_READY 3 94#define STATUS_READY 3
94#define STATUS_GEO_CONFIGURED 4
95#define STATUS_EXIT_PENDING 5 95#define STATUS_EXIT_PENDING 5
96#define STATUS_STATISTICS 6 96#define STATUS_STATISTICS 6
97#define STATUS_SCANNING 7 97#define STATUS_SCANNING 7
@@ -101,6 +101,7 @@ extern struct iwl_lib_ops iwl6030_lib;
101#define STATUS_CHANNEL_SWITCH_PENDING 11 101#define STATUS_CHANNEL_SWITCH_PENDING 11
102#define STATUS_SCAN_COMPLETE 12 102#define STATUS_SCAN_COMPLETE 12
103#define STATUS_POWER_PMI 13 103#define STATUS_POWER_PMI 13
104#define STATUS_SCAN_ROC_EXPIRED 14
104 105
105struct iwl_ucode_capabilities; 106struct iwl_ucode_capabilities;
106 107
@@ -255,6 +256,10 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
255 enum iwl_scan_type scan_type, 256 enum iwl_scan_type scan_type,
256 enum ieee80211_band band); 257 enum ieee80211_band band);
257 258
259void iwl_scan_roc_expired(struct iwl_priv *priv);
260void iwl_scan_offchannel_skb(struct iwl_priv *priv);
261void iwl_scan_offchannel_skb_status(struct iwl_priv *priv);
262
258/* For faster active scanning, scan will move to the next channel if fewer than 263/* For faster active scanning, scan will move to the next channel if fewer than
259 * PLCP_QUIET_THRESH packets are heard on this channel within 264 * PLCP_QUIET_THRESH packets are heard on this channel within
260 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell 265 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
@@ -264,7 +269,7 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
264#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */ 269#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
265#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */ 270#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
266 271
267#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7) 272#define IWL_SCAN_CHECK_WATCHDOG (HZ * 15)
268 273
269 274
270/* bt coex */ 275/* bt coex */
@@ -390,8 +395,10 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
390} 395}
391 396
392extern int iwl_alive_start(struct iwl_priv *priv); 397extern int iwl_alive_start(struct iwl_priv *priv);
393/* svtool */ 398
399/* testmode support */
394#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE 400#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
401
395extern int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, 402extern int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data,
396 int len); 403 int len);
397extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, 404extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw,
@@ -399,13 +406,16 @@ extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw,
399 struct netlink_callback *cb, 406 struct netlink_callback *cb,
400 void *data, int len); 407 void *data, int len);
401extern void iwl_testmode_init(struct iwl_priv *priv); 408extern void iwl_testmode_init(struct iwl_priv *priv);
402extern void iwl_testmode_cleanup(struct iwl_priv *priv); 409extern void iwl_testmode_free(struct iwl_priv *priv);
410
403#else 411#else
412
404static inline 413static inline
405int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len) 414int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
406{ 415{
407 return -ENOSYS; 416 return -ENOSYS;
408} 417}
418
409static inline 419static inline
410int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb, 420int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
411 struct netlink_callback *cb, 421 struct netlink_callback *cb,
@@ -413,12 +423,12 @@ int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
413{ 423{
414 return -ENOSYS; 424 return -ENOSYS;
415} 425}
416static inline 426
417void iwl_testmode_init(struct iwl_priv *priv) 427static inline void iwl_testmode_init(struct iwl_priv *priv)
418{ 428{
419} 429}
420static inline 430
421void iwl_testmode_cleanup(struct iwl_priv *priv) 431static inline void iwl_testmode_free(struct iwl_priv *priv)
422{ 432{
423} 433}
424#endif 434#endif
@@ -437,10 +447,8 @@ static inline void iwl_print_rx_config_cmd(struct iwl_priv *priv,
437 447
438static inline int iwl_is_ready(struct iwl_priv *priv) 448static inline int iwl_is_ready(struct iwl_priv *priv)
439{ 449{
440 /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are 450 /* The adapter is 'ready' if READY EXIT_PENDING is not set */
441 * set but EXIT_PENDING is not */
442 return test_bit(STATUS_READY, &priv->status) && 451 return test_bit(STATUS_READY, &priv->status) &&
443 test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
444 !test_bit(STATUS_EXIT_PENDING, &priv->status); 452 !test_bit(STATUS_EXIT_PENDING, &priv->status);
445} 453}
446 454
@@ -518,85 +526,4 @@ static inline const char *iwl_dvm_get_cmd_string(u8 cmd)
518 return s; 526 return s;
519 return "UNKNOWN"; 527 return "UNKNOWN";
520} 528}
521
522/* API method exported for mvm hybrid state */
523void iwl_setup_deferred_work(struct iwl_priv *priv);
524int iwl_send_wimax_coex(struct iwl_priv *priv);
525int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type);
526void iwl_option_config(struct iwl_priv *priv);
527void iwl_set_hw_params(struct iwl_priv *priv);
528void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags);
529int iwl_init_drv(struct iwl_priv *priv);
530void iwl_uninit_drv(struct iwl_priv *priv);
531void iwl_send_bt_config(struct iwl_priv *priv);
532void iwl_rf_kill_ct_config(struct iwl_priv *priv);
533int iwl_setup_interface(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
534void iwl_teardown_interface(struct iwl_priv *priv,
535 struct ieee80211_vif *vif,
536 bool mode_change);
537int iwl_full_rxon_required(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
538void iwlagn_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
539void iwlagn_check_needed_chains(struct iwl_priv *priv,
540 struct iwl_rxon_context *ctx,
541 struct ieee80211_bss_conf *bss_conf);
542void iwlagn_chain_noise_reset(struct iwl_priv *priv);
543int iwlagn_update_beacon(struct iwl_priv *priv,
544 struct ieee80211_vif *vif);
545void iwl_tt_handler(struct iwl_priv *priv);
546void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode);
547void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, int queue);
548void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state);
549void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb);
550void iwl_nic_error(struct iwl_op_mode *op_mode);
551void iwl_cmd_queue_full(struct iwl_op_mode *op_mode);
552void iwl_nic_config(struct iwl_op_mode *op_mode);
553int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
554 struct ieee80211_sta *sta, bool set);
555void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
556 enum ieee80211_rssi_event rssi_event);
557int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw);
558int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw);
559void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop);
560void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, int queue);
561void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
562 struct ieee80211_channel_switch *ch_switch);
563int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
564 struct ieee80211_vif *vif,
565 struct ieee80211_sta *sta,
566 enum ieee80211_sta_state old_state,
567 enum ieee80211_sta_state new_state);
568int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
569 struct ieee80211_vif *vif,
570 enum ieee80211_ampdu_mlme_action action,
571 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
572 u8 buf_size);
573int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
574 struct ieee80211_vif *vif,
575 struct cfg80211_scan_request *req);
576void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
577 struct ieee80211_vif *vif,
578 enum sta_notify_cmd cmd,
579 struct ieee80211_sta *sta);
580void iwlagn_configure_filter(struct ieee80211_hw *hw,
581 unsigned int changed_flags,
582 unsigned int *total_flags,
583 u64 multicast);
584int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
585 struct ieee80211_vif *vif, u16 queue,
586 const struct ieee80211_tx_queue_params *params);
587void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
588 struct ieee80211_vif *vif,
589 struct cfg80211_gtk_rekey_data *data);
590void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
591 struct ieee80211_vif *vif,
592 struct ieee80211_key_conf *keyconf,
593 struct ieee80211_sta *sta,
594 u32 iv32, u16 *phase1key);
595int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
596 struct ieee80211_vif *vif,
597 struct ieee80211_sta *sta,
598 struct ieee80211_key_conf *key);
599void iwlagn_mac_stop(struct ieee80211_hw *hw);
600void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
601int iwlagn_mac_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
602#endif /* __iwl_agn_h__ */ 529#endif /* __iwl_agn_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/dvm/calib.c
index 95f27f1a423b..f2dd671d7dc8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/calib.c
@@ -63,10 +63,11 @@
63#include <linux/slab.h> 63#include <linux/slab.h>
64#include <net/mac80211.h> 64#include <net/mac80211.h>
65 65
66#include "iwl-dev.h"
67#include "iwl-agn-calib.h"
68#include "iwl-trans.h" 66#include "iwl-trans.h"
69#include "iwl-agn.h" 67
68#include "dev.h"
69#include "calib.h"
70#include "agn.h"
70 71
71/***************************************************************************** 72/*****************************************************************************
72 * INIT calibrations framework 73 * INIT calibrations framework
@@ -832,14 +833,14 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
832 * To be safe, simply mask out any chains that we know 833 * To be safe, simply mask out any chains that we know
833 * are not on the device. 834 * are not on the device.
834 */ 835 */
835 active_chains &= priv->hw_params.valid_rx_ant; 836 active_chains &= priv->eeprom_data->valid_rx_ant;
836 837
837 num_tx_chains = 0; 838 num_tx_chains = 0;
838 for (i = 0; i < NUM_RX_CHAINS; i++) { 839 for (i = 0; i < NUM_RX_CHAINS; i++) {
839 /* loops on all the bits of 840 /* loops on all the bits of
840 * priv->hw_setting.valid_tx_ant */ 841 * priv->hw_setting.valid_tx_ant */
841 u8 ant_msk = (1 << i); 842 u8 ant_msk = (1 << i);
842 if (!(priv->hw_params.valid_tx_ant & ant_msk)) 843 if (!(priv->eeprom_data->valid_tx_ant & ant_msk))
843 continue; 844 continue;
844 845
845 num_tx_chains++; 846 num_tx_chains++;
@@ -853,7 +854,7 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
853 * connect the first valid tx chain 854 * connect the first valid tx chain
854 */ 855 */
855 first_chain = 856 first_chain =
856 find_first_chain(priv->hw_params.valid_tx_ant); 857 find_first_chain(priv->eeprom_data->valid_tx_ant);
857 data->disconn_array[first_chain] = 0; 858 data->disconn_array[first_chain] = 0;
858 active_chains |= BIT(first_chain); 859 active_chains |= BIT(first_chain);
859 IWL_DEBUG_CALIB(priv, 860 IWL_DEBUG_CALIB(priv,
@@ -863,13 +864,13 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
863 } 864 }
864 } 865 }
865 866
866 if (active_chains != priv->hw_params.valid_rx_ant && 867 if (active_chains != priv->eeprom_data->valid_rx_ant &&
867 active_chains != priv->chain_noise_data.active_chains) 868 active_chains != priv->chain_noise_data.active_chains)
868 IWL_DEBUG_CALIB(priv, 869 IWL_DEBUG_CALIB(priv,
869 "Detected that not all antennas are connected! " 870 "Detected that not all antennas are connected! "
870 "Connected: %#x, valid: %#x.\n", 871 "Connected: %#x, valid: %#x.\n",
871 active_chains, 872 active_chains,
872 priv->hw_params.valid_rx_ant); 873 priv->eeprom_data->valid_rx_ant);
873 874
874 /* Save for use within RXON, TX, SCAN commands, etc. */ 875 /* Save for use within RXON, TX, SCAN commands, etc. */
875 data->active_chains = active_chains; 876 data->active_chains = active_chains;
@@ -1054,7 +1055,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
1054 priv->cfg->bt_params->advanced_bt_coexist) { 1055 priv->cfg->bt_params->advanced_bt_coexist) {
1055 /* Disable disconnected antenna algorithm for advanced 1056 /* Disable disconnected antenna algorithm for advanced
1056 bt coex, assuming valid antennas are connected */ 1057 bt coex, assuming valid antennas are connected */
1057 data->active_chains = priv->hw_params.valid_rx_ant; 1058 data->active_chains = priv->eeprom_data->valid_rx_ant;
1058 for (i = 0; i < NUM_RX_CHAINS; i++) 1059 for (i = 0; i < NUM_RX_CHAINS; i++)
1059 if (!(data->active_chains & (1<<i))) 1060 if (!(data->active_chains & (1<<i)))
1060 data->disconn_array[i] = 1; 1061 data->disconn_array[i] = 1;
@@ -1083,8 +1084,9 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
1083 IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n", 1084 IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n",
1084 min_average_noise, min_average_noise_antenna_i); 1085 min_average_noise, min_average_noise_antenna_i);
1085 1086
1086 iwlagn_gain_computation(priv, average_noise, 1087 iwlagn_gain_computation(
1087 find_first_chain(priv->hw_params.valid_rx_ant)); 1088 priv, average_noise,
1089 find_first_chain(priv->eeprom_data->valid_rx_ant));
1088 1090
1089 /* Some power changes may have been made during the calibration. 1091 /* Some power changes may have been made during the calibration.
1090 * Update and commit the RXON 1092 * Update and commit the RXON
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h b/drivers/net/wireless/iwlwifi/dvm/calib.h
index dbe13787f272..2349f393cc42 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
+++ b/drivers/net/wireless/iwlwifi/dvm/calib.h
@@ -62,8 +62,8 @@
62#ifndef __iwl_calib_h__ 62#ifndef __iwl_calib_h__
63#define __iwl_calib_h__ 63#define __iwl_calib_h__
64 64
65#include "iwl-dev.h" 65#include "dev.h"
66#include "iwl-commands.h" 66#include "commands.h"
67 67
68void iwl_chain_noise_calibration(struct iwl_priv *priv); 68void iwl_chain_noise_calibration(struct iwl_priv *priv);
69void iwl_sensitivity_calibration(struct iwl_priv *priv); 69void iwl_sensitivity_calibration(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/dvm/commands.h
index 9af6a239b384..97bea16f3592 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/dvm/commands.h
@@ -61,9 +61,9 @@
61 * 61 *
62 *****************************************************************************/ 62 *****************************************************************************/
63/* 63/*
64 * Please use this file (iwl-commands.h) only for uCode API definitions. 64 * Please use this file (commands.h) only for uCode API definitions.
65 * Please use iwl-xxxx-hw.h for hardware-related definitions. 65 * Please use iwl-xxxx-hw.h for hardware-related definitions.
66 * Please use iwl-dev.h for driver implementation definitions. 66 * Please use dev.h for driver implementation definitions.
67 */ 67 */
68 68
69#ifndef __iwl_commands_h__ 69#ifndef __iwl_commands_h__
@@ -190,6 +190,44 @@ enum {
190 REPLY_MAX = 0xff 190 REPLY_MAX = 0xff
191}; 191};
192 192
193/*
194 * Minimum number of queues. MAX_NUM is defined in hw specific files.
195 * Set the minimum to accommodate
196 * - 4 standard TX queues
197 * - the command queue
198 * - 4 PAN TX queues
199 * - the PAN multicast queue, and
200 * - the AUX (TX during scan dwell) queue.
201 */
202#define IWL_MIN_NUM_QUEUES 11
203
204/*
205 * Command queue depends on iPAN support.
206 */
207#define IWL_DEFAULT_CMD_QUEUE_NUM 4
208#define IWL_IPAN_CMD_QUEUE_NUM 9
209
210#define IWL_TX_FIFO_BK 0 /* shared */
211#define IWL_TX_FIFO_BE 1
212#define IWL_TX_FIFO_VI 2 /* shared */
213#define IWL_TX_FIFO_VO 3
214#define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK
215#define IWL_TX_FIFO_BE_IPAN 4
216#define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI
217#define IWL_TX_FIFO_VO_IPAN 5
218/* re-uses the VO FIFO, uCode will properly flush/schedule */
219#define IWL_TX_FIFO_AUX 5
220#define IWL_TX_FIFO_UNUSED 255
221
222#define IWLAGN_CMD_FIFO_NUM 7
223
224/*
225 * This queue number is required for proper operation
226 * because the ucode will stop/start the scheduler as
227 * required.
228 */
229#define IWL_IPAN_MCAST_QUEUE 8
230
193/****************************************************************************** 231/******************************************************************************
194 * (0) 232 * (0)
195 * Commonly used structures and definitions: 233 * Commonly used structures and definitions:
@@ -197,9 +235,6 @@ enum {
197 * 235 *
198 *****************************************************************************/ 236 *****************************************************************************/
199 237
200/* iwl_cmd_header flags value */
201#define IWL_CMD_FAILED_MSK 0x40
202
203/** 238/**
204 * iwlagn rate_n_flags bit fields 239 * iwlagn rate_n_flags bit fields
205 * 240 *
@@ -758,8 +793,6 @@ struct iwl_qosparam_cmd {
758#define IWLAGN_BROADCAST_ID 15 793#define IWLAGN_BROADCAST_ID 15
759#define IWLAGN_STATION_COUNT 16 794#define IWLAGN_STATION_COUNT 16
760 795
761#define IWL_INVALID_STATION 255
762#define IWL_MAX_TID_COUNT 8
763#define IWL_TID_NON_QOS IWL_MAX_TID_COUNT 796#define IWL_TID_NON_QOS IWL_MAX_TID_COUNT
764 797
765#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2) 798#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index 7f97dec8534d..46782f1102ac 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -30,16 +30,12 @@
30#include <linux/kernel.h> 30#include <linux/kernel.h>
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/debugfs.h> 32#include <linux/debugfs.h>
33
34#include <linux/ieee80211.h> 33#include <linux/ieee80211.h>
35#include <net/mac80211.h> 34#include <net/mac80211.h>
36
37
38#include "iwl-dev.h"
39#include "iwl-debug.h" 35#include "iwl-debug.h"
40#include "iwl-io.h" 36#include "iwl-io.h"
41#include "iwl-agn.h" 37#include "dev.h"
42#include "iwl-modparams.h" 38#include "agn.h"
43 39
44/* create and remove of files */ 40/* create and remove of files */
45#define DEBUGFS_ADD_FILE(name, parent, mode) do { \ 41#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
@@ -87,7 +83,7 @@ static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
87#define DEBUGFS_READ_FILE_OPS(name) \ 83#define DEBUGFS_READ_FILE_OPS(name) \
88 DEBUGFS_READ_FUNC(name); \ 84 DEBUGFS_READ_FUNC(name); \
89static const struct file_operations iwl_dbgfs_##name##_ops = { \ 85static const struct file_operations iwl_dbgfs_##name##_ops = { \
90 .read = iwl_dbgfs_##name##_read, \ 86 .read = iwl_dbgfs_##name##_read, \
91 .open = simple_open, \ 87 .open = simple_open, \
92 .llseek = generic_file_llseek, \ 88 .llseek = generic_file_llseek, \
93}; 89};
@@ -307,13 +303,13 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
307 const u8 *ptr; 303 const u8 *ptr;
308 char *buf; 304 char *buf;
309 u16 eeprom_ver; 305 u16 eeprom_ver;
310 size_t eeprom_len = priv->cfg->base_params->eeprom_size; 306 size_t eeprom_len = priv->eeprom_blob_size;
311 buf_size = 4 * eeprom_len + 256; 307 buf_size = 4 * eeprom_len + 256;
312 308
313 if (eeprom_len % 16) 309 if (eeprom_len % 16)
314 return -ENODATA; 310 return -ENODATA;
315 311
316 ptr = priv->eeprom; 312 ptr = priv->eeprom_blob;
317 if (!ptr) 313 if (!ptr)
318 return -ENOMEM; 314 return -ENOMEM;
319 315
@@ -322,11 +318,9 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
322 if (!buf) 318 if (!buf)
323 return -ENOMEM; 319 return -ENOMEM;
324 320
325 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION); 321 eeprom_ver = priv->eeprom_data->eeprom_version;
326 pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s, " 322 pos += scnprintf(buf + pos, buf_size - pos,
327 "version: 0x%x\n", 323 "NVM version: 0x%x\n", eeprom_ver);
328 (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
329 ? "OTP" : "EEPROM", eeprom_ver);
330 for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) { 324 for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
331 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs); 325 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
332 hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos, 326 hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
@@ -351,9 +345,6 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
351 char *buf; 345 char *buf;
352 ssize_t ret; 346 ssize_t ret;
353 347
354 if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status))
355 return -EAGAIN;
356
357 buf = kzalloc(bufsz, GFP_KERNEL); 348 buf = kzalloc(bufsz, GFP_KERNEL);
358 if (!buf) 349 if (!buf)
359 return -ENOMEM; 350 return -ENOMEM;
@@ -426,8 +417,6 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
426 test_bit(STATUS_ALIVE, &priv->status)); 417 test_bit(STATUS_ALIVE, &priv->status));
427 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n", 418 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
428 test_bit(STATUS_READY, &priv->status)); 419 test_bit(STATUS_READY, &priv->status));
429 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n",
430 test_bit(STATUS_GEO_CONFIGURED, &priv->status));
431 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n", 420 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
432 test_bit(STATUS_EXIT_PENDING, &priv->status)); 421 test_bit(STATUS_EXIT_PENDING, &priv->status));
433 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n", 422 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
@@ -1341,17 +1330,17 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
1341 if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) { 1330 if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) {
1342 pos += scnprintf(buf + pos, bufsz - pos, 1331 pos += scnprintf(buf + pos, bufsz - pos,
1343 "tx power: (1/2 dB step)\n"); 1332 "tx power: (1/2 dB step)\n");
1344 if ((priv->hw_params.valid_tx_ant & ANT_A) && 1333 if ((priv->eeprom_data->valid_tx_ant & ANT_A) &&
1345 tx->tx_power.ant_a) 1334 tx->tx_power.ant_a)
1346 pos += scnprintf(buf + pos, bufsz - pos, 1335 pos += scnprintf(buf + pos, bufsz - pos,
1347 fmt_hex, "antenna A:", 1336 fmt_hex, "antenna A:",
1348 tx->tx_power.ant_a); 1337 tx->tx_power.ant_a);
1349 if ((priv->hw_params.valid_tx_ant & ANT_B) && 1338 if ((priv->eeprom_data->valid_tx_ant & ANT_B) &&
1350 tx->tx_power.ant_b) 1339 tx->tx_power.ant_b)
1351 pos += scnprintf(buf + pos, bufsz - pos, 1340 pos += scnprintf(buf + pos, bufsz - pos,
1352 fmt_hex, "antenna B:", 1341 fmt_hex, "antenna B:",
1353 tx->tx_power.ant_b); 1342 tx->tx_power.ant_b);
1354 if ((priv->hw_params.valid_tx_ant & ANT_C) && 1343 if ((priv->eeprom_data->valid_tx_ant & ANT_C) &&
1355 tx->tx_power.ant_c) 1344 tx->tx_power.ant_c)
1356 pos += scnprintf(buf + pos, bufsz - pos, 1345 pos += scnprintf(buf + pos, bufsz - pos,
1357 fmt_hex, "antenna C:", 1346 fmt_hex, "antenna C:",
@@ -2266,6 +2255,10 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
2266 char buf[8]; 2255 char buf[8];
2267 int buf_size; 2256 int buf_size;
2268 2257
2258 /* check that the interface is up */
2259 if (!iwl_is_ready(priv))
2260 return -EAGAIN;
2261
2269 memset(buf, 0, sizeof(buf)); 2262 memset(buf, 0, sizeof(buf));
2270 buf_size = min(count, sizeof(buf) - 1); 2263 buf_size = min(count, sizeof(buf) - 1);
2271 if (copy_from_user(buf, user_buf, buf_size)) 2264 if (copy_from_user(buf, user_buf, buf_size))
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index 70062379d0ec..054f728f6266 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -24,8 +24,8 @@
24 * 24 *
25 *****************************************************************************/ 25 *****************************************************************************/
26/* 26/*
27 * Please use this file (iwl-dev.h) for driver implementation definitions. 27 * Please use this file (dev.h) for driver implementation definitions.
28 * Please use iwl-commands.h for uCode API definitions. 28 * Please use commands.h for uCode API definitions.
29 */ 29 */
30 30
31#ifndef __iwl_dev_h__ 31#ifndef __iwl_dev_h__
@@ -39,17 +39,20 @@
39#include <linux/mutex.h> 39#include <linux/mutex.h>
40 40
41#include "iwl-fw.h" 41#include "iwl-fw.h"
42#include "iwl-eeprom.h" 42#include "iwl-eeprom-parse.h"
43#include "iwl-csr.h" 43#include "iwl-csr.h"
44#include "iwl-debug.h" 44#include "iwl-debug.h"
45#include "iwl-agn-hw.h" 45#include "iwl-agn-hw.h"
46#include "iwl-led.h"
47#include "iwl-power.h"
48#include "iwl-agn-rs.h"
49#include "iwl-agn-tt.h"
50#include "iwl-trans.h"
51#include "iwl-op-mode.h" 46#include "iwl-op-mode.h"
52#include "iwl-notif-wait.h" 47#include "iwl-notif-wait.h"
48#include "iwl-trans.h"
49
50#include "led.h"
51#include "power.h"
52#include "rs.h"
53#include "tt.h"
54
55#include "iwl-test.h"
53 56
54/* CT-KILL constants */ 57/* CT-KILL constants */
55#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */ 58#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
@@ -87,49 +90,6 @@
87 90
88#define IWL_NUM_SCAN_RATES (2) 91#define IWL_NUM_SCAN_RATES (2)
89 92
90/*
91 * One for each channel, holds all channel setup data
92 * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
93 * with one another!
94 */
95struct iwl_channel_info {
96 struct iwl_eeprom_channel eeprom; /* EEPROM regulatory limit */
97 struct iwl_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for
98 * HT40 channel */
99
100 u8 channel; /* channel number */
101 u8 flags; /* flags copied from EEPROM */
102 s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
103 s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) limit */
104 s8 min_power; /* always 0 */
105 s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */
106
107 u8 group_index; /* 0-4, maps channel to group1/2/3/4/5 */
108 u8 band_index; /* 0-4, maps channel to band1/2/3/4/5 */
109 enum ieee80211_band band;
110
111 /* HT40 channel info */
112 s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
113 u8 ht40_flags; /* flags copied from EEPROM */
114 u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
115};
116
117/*
118 * Minimum number of queues. MAX_NUM is defined in hw specific files.
119 * Set the minimum to accommodate
120 * - 4 standard TX queues
121 * - the command queue
122 * - 4 PAN TX queues
123 * - the PAN multicast queue, and
124 * - the AUX (TX during scan dwell) queue.
125 */
126#define IWL_MIN_NUM_QUEUES 11
127
128/*
129 * Command queue depends on iPAN support.
130 */
131#define IWL_DEFAULT_CMD_QUEUE_NUM 4
132#define IWL_IPAN_CMD_QUEUE_NUM 9
133 93
134#define IEEE80211_DATA_LEN 2304 94#define IEEE80211_DATA_LEN 2304
135#define IEEE80211_4ADDR_LEN 30 95#define IEEE80211_4ADDR_LEN 30
@@ -153,29 +113,6 @@ union iwl_ht_rate_supp {
153 }; 113 };
154}; 114};
155 115
156#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0)
157#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1)
158#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2)
159#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3)
160#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K
161#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K
162#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K
163
164/*
165 * Maximal MPDU density for TX aggregation
166 * 4 - 2us density
167 * 5 - 4us density
168 * 6 - 8us density
169 * 7 - 16us density
170 */
171#define CFG_HT_MPDU_DENSITY_2USEC (0x4)
172#define CFG_HT_MPDU_DENSITY_4USEC (0x5)
173#define CFG_HT_MPDU_DENSITY_8USEC (0x6)
174#define CFG_HT_MPDU_DENSITY_16USEC (0x7)
175#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
176#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
177#define CFG_HT_MPDU_DENSITY_MIN (0x1)
178
179struct iwl_ht_config { 116struct iwl_ht_config {
180 bool single_chain_sufficient; 117 bool single_chain_sufficient;
181 enum ieee80211_smps_mode smps; /* current smps mode */ 118 enum ieee80211_smps_mode smps; /* current smps mode */
@@ -445,23 +382,6 @@ enum {
445 MEASUREMENT_ACTIVE = (1 << 1), 382 MEASUREMENT_ACTIVE = (1 << 1),
446}; 383};
447 384
448enum iwl_nvm_type {
449 NVM_DEVICE_TYPE_EEPROM = 0,
450 NVM_DEVICE_TYPE_OTP,
451};
452
453/*
454 * Two types of OTP memory access modes
455 * IWL_OTP_ACCESS_ABSOLUTE - absolute address mode,
456 * based on physical memory addressing
457 * IWL_OTP_ACCESS_RELATIVE - relative address mode,
458 * based on logical memory addressing
459 */
460enum iwl_access_mode {
461 IWL_OTP_ACCESS_ABSOLUTE,
462 IWL_OTP_ACCESS_RELATIVE,
463};
464
465/* reply_tx_statistics (for _agn devices) */ 385/* reply_tx_statistics (for _agn devices) */
466struct reply_tx_error_statistics { 386struct reply_tx_error_statistics {
467 u32 pp_delay; 387 u32 pp_delay;
@@ -632,10 +552,6 @@ enum iwl_scan_type {
632 * 552 *
633 * @tx_chains_num: Number of TX chains 553 * @tx_chains_num: Number of TX chains
634 * @rx_chains_num: Number of RX chains 554 * @rx_chains_num: Number of RX chains
635 * @valid_tx_ant: usable antennas for TX
636 * @valid_rx_ant: usable antennas for RX
637 * @ht40_channel: is 40MHz width possible: BIT(IEEE80211_BAND_XXX)
638 * @sku: sku read from EEPROM
639 * @ct_kill_threshold: temperature threshold - in hw dependent unit 555 * @ct_kill_threshold: temperature threshold - in hw dependent unit
640 * @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit 556 * @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit
641 * relevant for 1000, 6000 and up 557 * relevant for 1000, 6000 and up
@@ -645,11 +561,7 @@ enum iwl_scan_type {
645struct iwl_hw_params { 561struct iwl_hw_params {
646 u8 tx_chains_num; 562 u8 tx_chains_num;
647 u8 rx_chains_num; 563 u8 rx_chains_num;
648 u8 valid_tx_ant;
649 u8 valid_rx_ant;
650 u8 ht40_channel;
651 bool use_rts_for_aggregation; 564 bool use_rts_for_aggregation;
652 u16 sku;
653 u32 ct_kill_threshold; 565 u32 ct_kill_threshold;
654 u32 ct_kill_exit_threshold; 566 u32 ct_kill_exit_threshold;
655 567
@@ -664,31 +576,10 @@ struct iwl_lib_ops {
664 /* device specific configuration */ 576 /* device specific configuration */
665 void (*nic_config)(struct iwl_priv *priv); 577 void (*nic_config)(struct iwl_priv *priv);
666 578
667 /* eeprom operations (as defined in iwl-eeprom.h) */
668 struct iwl_eeprom_ops eeprom_ops;
669
670 /* temperature */ 579 /* temperature */
671 void (*temperature)(struct iwl_priv *priv); 580 void (*temperature)(struct iwl_priv *priv);
672}; 581};
673 582
674#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
675struct iwl_testmode_trace {
676 u32 buff_size;
677 u32 total_size;
678 u32 num_chunks;
679 u8 *cpu_addr;
680 u8 *trace_addr;
681 dma_addr_t dma_addr;
682 bool trace_enabled;
683};
684struct iwl_testmode_mem {
685 u32 buff_size;
686 u32 num_chunks;
687 u8 *buff_addr;
688 bool read_in_progress;
689};
690#endif
691
692struct iwl_wipan_noa_data { 583struct iwl_wipan_noa_data {
693 struct rcu_head rcu_head; 584 struct rcu_head rcu_head;
694 u32 length; 585 u32 length;
@@ -735,8 +626,6 @@ struct iwl_priv {
735 626
736 /* ieee device used by generic ieee processing code */ 627 /* ieee device used by generic ieee processing code */
737 struct ieee80211_hw *hw; 628 struct ieee80211_hw *hw;
738 struct ieee80211_channel *ieee_channels;
739 struct ieee80211_rate *ieee_rates;
740 629
741 struct list_head calib_results; 630 struct list_head calib_results;
742 631
@@ -747,16 +636,12 @@ struct iwl_priv {
747 enum ieee80211_band band; 636 enum ieee80211_band band;
748 u8 valid_contexts; 637 u8 valid_contexts;
749 638
750 void (*pre_rx_handler)(struct iwl_priv *priv,
751 struct iwl_rx_cmd_buffer *rxb);
752 int (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv, 639 int (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
753 struct iwl_rx_cmd_buffer *rxb, 640 struct iwl_rx_cmd_buffer *rxb,
754 struct iwl_device_cmd *cmd); 641 struct iwl_device_cmd *cmd);
755 642
756 struct iwl_notif_wait_data notif_wait; 643 struct iwl_notif_wait_data notif_wait;
757 644
758 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
759
760 /* spectrum measurement report caching */ 645 /* spectrum measurement report caching */
761 struct iwl_spectrum_notification measure_report; 646 struct iwl_spectrum_notification measure_report;
762 u8 measurement_status; 647 u8 measurement_status;
@@ -787,11 +672,6 @@ struct iwl_priv {
787 bool ucode_loaded; 672 bool ucode_loaded;
788 bool init_ucode_run; /* Don't run init uCode again */ 673 bool init_ucode_run; /* Don't run init uCode again */
789 674
790 /* we allocate array of iwl_channel_info for NIC's valid channels.
791 * Access via channel # using indirect index array */
792 struct iwl_channel_info *channel_info; /* channel info array */
793 u8 channel_count; /* # of channels */
794
795 u8 plcp_delta_threshold; 675 u8 plcp_delta_threshold;
796 676
797 /* thermal calibration */ 677 /* thermal calibration */
@@ -846,6 +726,7 @@ struct iwl_priv {
846 struct iwl_station_entry stations[IWLAGN_STATION_COUNT]; 726 struct iwl_station_entry stations[IWLAGN_STATION_COUNT];
847 unsigned long ucode_key_table; 727 unsigned long ucode_key_table;
848 struct iwl_tid_data tid_data[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT]; 728 struct iwl_tid_data tid_data[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT];
729 atomic_t num_aux_in_flight;
849 730
850 u8 mac80211_registered; 731 u8 mac80211_registered;
851 732
@@ -950,10 +831,8 @@ struct iwl_priv {
950 831
951 struct delayed_work scan_check; 832 struct delayed_work scan_check;
952 833
953 /* TX Power */ 834 /* TX Power settings */
954 s8 tx_power_user_lmt; 835 s8 tx_power_user_lmt;
955 s8 tx_power_device_lmt;
956 s8 tx_power_lmt_in_half_dbm; /* max tx power in half-dBm format */
957 s8 tx_power_next; 836 s8 tx_power_next;
958 837
959#ifdef CONFIG_IWLWIFI_DEBUGFS 838#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -964,9 +843,10 @@ struct iwl_priv {
964 void *wowlan_sram; 843 void *wowlan_sram;
965#endif /* CONFIG_IWLWIFI_DEBUGFS */ 844#endif /* CONFIG_IWLWIFI_DEBUGFS */
966 845
967 /* eeprom -- this is in the card's little endian byte order */ 846 struct iwl_eeprom_data *eeprom_data;
968 u8 *eeprom; 847 /* eeprom blob for debugfs/testmode */
969 enum iwl_nvm_type nvm_device_type; 848 u8 *eeprom_blob;
849 size_t eeprom_blob_size;
970 850
971 struct work_struct txpower_work; 851 struct work_struct txpower_work;
972 u32 calib_disabled; 852 u32 calib_disabled;
@@ -979,9 +859,9 @@ struct iwl_priv {
979 struct led_classdev led; 859 struct led_classdev led;
980 unsigned long blink_on, blink_off; 860 unsigned long blink_on, blink_off;
981 bool led_registered; 861 bool led_registered;
862
982#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE 863#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
983 struct iwl_testmode_trace testmode_trace; 864 struct iwl_test tst;
984 struct iwl_testmode_mem testmode_mem;
985 u32 tm_fixed_rate; 865 u32 tm_fixed_rate;
986#endif 866#endif
987 867
@@ -1001,8 +881,6 @@ struct iwl_priv {
1001 enum iwl_ucode_type cur_ucode; 881 enum iwl_ucode_type cur_ucode;
1002}; /*iwl_priv */ 882}; /*iwl_priv */
1003 883
1004extern struct kmem_cache *iwl_tx_cmd_pool;
1005
1006static inline struct iwl_rxon_context * 884static inline struct iwl_rxon_context *
1007iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif) 885iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif)
1008{ 886{
@@ -1036,36 +914,4 @@ static inline int iwl_is_any_associated(struct iwl_priv *priv)
1036 return false; 914 return false;
1037} 915}
1038 916
1039static inline int is_channel_valid(const struct iwl_channel_info *ch_info)
1040{
1041 if (ch_info == NULL)
1042 return 0;
1043 return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
1044}
1045
1046static inline int is_channel_radar(const struct iwl_channel_info *ch_info)
1047{
1048 return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
1049}
1050
1051static inline u8 is_channel_a_band(const struct iwl_channel_info *ch_info)
1052{
1053 return ch_info->band == IEEE80211_BAND_5GHZ;
1054}
1055
1056static inline u8 is_channel_bg_band(const struct iwl_channel_info *ch_info)
1057{
1058 return ch_info->band == IEEE80211_BAND_2GHZ;
1059}
1060
1061static inline int is_channel_passive(const struct iwl_channel_info *ch)
1062{
1063 return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
1064}
1065
1066static inline int is_channel_ibss(const struct iwl_channel_info *ch)
1067{
1068 return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0;
1069}
1070
1071#endif /* __iwl_dev_h__ */ 917#endif /* __iwl_dev_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-devices.c b/drivers/net/wireless/iwlwifi/dvm/devices.c
index 48533b3a0f9a..349c205d5f62 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-devices.c
+++ b/drivers/net/wireless/iwlwifi/dvm/devices.c
@@ -27,11 +27,14 @@
27/* 27/*
28 * DVM device-specific data & functions 28 * DVM device-specific data & functions
29 */ 29 */
30#include "iwl-agn.h"
31#include "iwl-dev.h"
32#include "iwl-commands.h"
33#include "iwl-io.h" 30#include "iwl-io.h"
34#include "iwl-prph.h" 31#include "iwl-prph.h"
32#include "iwl-eeprom-parse.h"
33
34#include "agn.h"
35#include "dev.h"
36#include "commands.h"
37
35 38
36/* 39/*
37 * 1000 series 40 * 1000 series
@@ -58,11 +61,6 @@ static void iwl1000_set_ct_threshold(struct iwl_priv *priv)
58/* NIC configuration for 1000 series */ 61/* NIC configuration for 1000 series */
59static void iwl1000_nic_config(struct iwl_priv *priv) 62static void iwl1000_nic_config(struct iwl_priv *priv)
60{ 63{
61 /* set CSR_HW_CONFIG_REG for uCode use */
62 iwl_set_bit(priv->trans, CSR_HW_IF_CONFIG_REG,
63 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
64 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
65
66 /* Setting digital SVR for 1000 card to 1.32V */ 64 /* Setting digital SVR for 1000 card to 1.32V */
67 /* locking is acquired in iwl_set_bits_mask_prph() function */ 65 /* locking is acquired in iwl_set_bits_mask_prph() function */
68 iwl_set_bits_mask_prph(priv->trans, APMG_DIGITAL_SVR_REG, 66 iwl_set_bits_mask_prph(priv->trans, APMG_DIGITAL_SVR_REG,
@@ -170,16 +168,6 @@ static const struct iwl_sensitivity_ranges iwl1000_sensitivity = {
170 168
171static void iwl1000_hw_set_hw_params(struct iwl_priv *priv) 169static void iwl1000_hw_set_hw_params(struct iwl_priv *priv)
172{ 170{
173 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ);
174
175 priv->hw_params.tx_chains_num =
176 num_of_ant(priv->hw_params.valid_tx_ant);
177 if (priv->cfg->rx_with_siso_diversity)
178 priv->hw_params.rx_chains_num = 1;
179 else
180 priv->hw_params.rx_chains_num =
181 num_of_ant(priv->hw_params.valid_rx_ant);
182
183 iwl1000_set_ct_threshold(priv); 171 iwl1000_set_ct_threshold(priv);
184 172
185 /* Set initial sensitivity parameters */ 173 /* Set initial sensitivity parameters */
@@ -189,17 +177,6 @@ static void iwl1000_hw_set_hw_params(struct iwl_priv *priv)
189struct iwl_lib_ops iwl1000_lib = { 177struct iwl_lib_ops iwl1000_lib = {
190 .set_hw_params = iwl1000_hw_set_hw_params, 178 .set_hw_params = iwl1000_hw_set_hw_params,
191 .nic_config = iwl1000_nic_config, 179 .nic_config = iwl1000_nic_config,
192 .eeprom_ops = {
193 .regulatory_bands = {
194 EEPROM_REG_BAND_1_CHANNELS,
195 EEPROM_REG_BAND_2_CHANNELS,
196 EEPROM_REG_BAND_3_CHANNELS,
197 EEPROM_REG_BAND_4_CHANNELS,
198 EEPROM_REG_BAND_5_CHANNELS,
199 EEPROM_REG_BAND_24_HT40_CHANNELS,
200 EEPROM_REGULATORY_BAND_NO_HT40,
201 },
202 },
203 .temperature = iwlagn_temperature, 180 .temperature = iwlagn_temperature,
204}; 181};
205 182
@@ -219,8 +196,6 @@ static void iwl2000_set_ct_threshold(struct iwl_priv *priv)
219/* NIC configuration for 2000 series */ 196/* NIC configuration for 2000 series */
220static void iwl2000_nic_config(struct iwl_priv *priv) 197static void iwl2000_nic_config(struct iwl_priv *priv)
221{ 198{
222 iwl_rf_config(priv);
223
224 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG, 199 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
225 CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER); 200 CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
226} 201}
@@ -251,16 +226,6 @@ static const struct iwl_sensitivity_ranges iwl2000_sensitivity = {
251 226
252static void iwl2000_hw_set_hw_params(struct iwl_priv *priv) 227static void iwl2000_hw_set_hw_params(struct iwl_priv *priv)
253{ 228{
254 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ);
255
256 priv->hw_params.tx_chains_num =
257 num_of_ant(priv->hw_params.valid_tx_ant);
258 if (priv->cfg->rx_with_siso_diversity)
259 priv->hw_params.rx_chains_num = 1;
260 else
261 priv->hw_params.rx_chains_num =
262 num_of_ant(priv->hw_params.valid_rx_ant);
263
264 iwl2000_set_ct_threshold(priv); 229 iwl2000_set_ct_threshold(priv);
265 230
266 /* Set initial sensitivity parameters */ 231 /* Set initial sensitivity parameters */
@@ -270,36 +235,12 @@ static void iwl2000_hw_set_hw_params(struct iwl_priv *priv)
270struct iwl_lib_ops iwl2000_lib = { 235struct iwl_lib_ops iwl2000_lib = {
271 .set_hw_params = iwl2000_hw_set_hw_params, 236 .set_hw_params = iwl2000_hw_set_hw_params,
272 .nic_config = iwl2000_nic_config, 237 .nic_config = iwl2000_nic_config,
273 .eeprom_ops = {
274 .regulatory_bands = {
275 EEPROM_REG_BAND_1_CHANNELS,
276 EEPROM_REG_BAND_2_CHANNELS,
277 EEPROM_REG_BAND_3_CHANNELS,
278 EEPROM_REG_BAND_4_CHANNELS,
279 EEPROM_REG_BAND_5_CHANNELS,
280 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
281 EEPROM_REGULATORY_BAND_NO_HT40,
282 },
283 .enhanced_txpower = true,
284 },
285 .temperature = iwlagn_temperature, 238 .temperature = iwlagn_temperature,
286}; 239};
287 240
288struct iwl_lib_ops iwl2030_lib = { 241struct iwl_lib_ops iwl2030_lib = {
289 .set_hw_params = iwl2000_hw_set_hw_params, 242 .set_hw_params = iwl2000_hw_set_hw_params,
290 .nic_config = iwl2000_nic_config, 243 .nic_config = iwl2000_nic_config,
291 .eeprom_ops = {
292 .regulatory_bands = {
293 EEPROM_REG_BAND_1_CHANNELS,
294 EEPROM_REG_BAND_2_CHANNELS,
295 EEPROM_REG_BAND_3_CHANNELS,
296 EEPROM_REG_BAND_4_CHANNELS,
297 EEPROM_REG_BAND_5_CHANNELS,
298 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
299 EEPROM_REGULATORY_BAND_NO_HT40,
300 },
301 .enhanced_txpower = true,
302 },
303 .temperature = iwlagn_temperature, 244 .temperature = iwlagn_temperature,
304}; 245};
305 246
@@ -309,19 +250,6 @@ struct iwl_lib_ops iwl2030_lib = {
309 */ 250 */
310 251
311/* NIC configuration for 5000 series */ 252/* NIC configuration for 5000 series */
312static void iwl5000_nic_config(struct iwl_priv *priv)
313{
314 iwl_rf_config(priv);
315
316 /* W/A : NIC is stuck in a reset state after Early PCIe power off
317 * (PCIe power is lost before PERST# is asserted),
318 * causing ME FW to lose ownership and not being able to obtain it back.
319 */
320 iwl_set_bits_mask_prph(priv->trans, APMG_PS_CTRL_REG,
321 APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
322 ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
323}
324
325static const struct iwl_sensitivity_ranges iwl5000_sensitivity = { 253static const struct iwl_sensitivity_ranges iwl5000_sensitivity = {
326 .min_nrg_cck = 100, 254 .min_nrg_cck = 100,
327 .auto_corr_min_ofdm = 90, 255 .auto_corr_min_ofdm = 90,
@@ -376,11 +304,9 @@ static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
376static s32 iwl_temp_calib_to_offset(struct iwl_priv *priv) 304static s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
377{ 305{
378 u16 temperature, voltage; 306 u16 temperature, voltage;
379 __le16 *temp_calib = (__le16 *)iwl_eeprom_query_addr(priv,
380 EEPROM_KELVIN_TEMPERATURE);
381 307
382 temperature = le16_to_cpu(temp_calib[0]); 308 temperature = le16_to_cpu(priv->eeprom_data->kelvin_temperature);
383 voltage = le16_to_cpu(temp_calib[1]); 309 voltage = le16_to_cpu(priv->eeprom_data->kelvin_voltage);
384 310
385 /* offset = temp - volt / coeff */ 311 /* offset = temp - volt / coeff */
386 return (s32)(temperature - 312 return (s32)(temperature -
@@ -404,14 +330,6 @@ static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
404 330
405static void iwl5000_hw_set_hw_params(struct iwl_priv *priv) 331static void iwl5000_hw_set_hw_params(struct iwl_priv *priv)
406{ 332{
407 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
408 BIT(IEEE80211_BAND_5GHZ);
409
410 priv->hw_params.tx_chains_num =
411 num_of_ant(priv->hw_params.valid_tx_ant);
412 priv->hw_params.rx_chains_num =
413 num_of_ant(priv->hw_params.valid_rx_ant);
414
415 iwl5000_set_ct_threshold(priv); 333 iwl5000_set_ct_threshold(priv);
416 334
417 /* Set initial sensitivity parameters */ 335 /* Set initial sensitivity parameters */
@@ -420,14 +338,6 @@ static void iwl5000_hw_set_hw_params(struct iwl_priv *priv)
420 338
421static void iwl5150_hw_set_hw_params(struct iwl_priv *priv) 339static void iwl5150_hw_set_hw_params(struct iwl_priv *priv)
422{ 340{
423 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
424 BIT(IEEE80211_BAND_5GHZ);
425
426 priv->hw_params.tx_chains_num =
427 num_of_ant(priv->hw_params.valid_tx_ant);
428 priv->hw_params.rx_chains_num =
429 num_of_ant(priv->hw_params.valid_rx_ant);
430
431 iwl5150_set_ct_threshold(priv); 341 iwl5150_set_ct_threshold(priv);
432 342
433 /* Set initial sensitivity parameters */ 343 /* Set initial sensitivity parameters */
@@ -455,7 +365,6 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
455 */ 365 */
456 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 366 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
457 struct iwl5000_channel_switch_cmd cmd; 367 struct iwl5000_channel_switch_cmd cmd;
458 const struct iwl_channel_info *ch_info;
459 u32 switch_time_in_usec, ucode_switch_time; 368 u32 switch_time_in_usec, ucode_switch_time;
460 u16 ch; 369 u16 ch;
461 u32 tsf_low; 370 u32 tsf_low;
@@ -505,14 +414,7 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
505 } 414 }
506 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n", 415 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
507 cmd.switch_time); 416 cmd.switch_time);
508 ch_info = iwl_get_channel_info(priv, priv->band, ch); 417 cmd.expect_beacon = ch_switch->channel->flags & IEEE80211_CHAN_RADAR;
509 if (ch_info)
510 cmd.expect_beacon = is_channel_radar(ch_info);
511 else {
512 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
513 ctx->active.channel, ch);
514 return -EFAULT;
515 }
516 418
517 return iwl_dvm_send_cmd(priv, &hcmd); 419 return iwl_dvm_send_cmd(priv, &hcmd);
518} 420}
@@ -520,36 +422,12 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
520struct iwl_lib_ops iwl5000_lib = { 422struct iwl_lib_ops iwl5000_lib = {
521 .set_hw_params = iwl5000_hw_set_hw_params, 423 .set_hw_params = iwl5000_hw_set_hw_params,
522 .set_channel_switch = iwl5000_hw_channel_switch, 424 .set_channel_switch = iwl5000_hw_channel_switch,
523 .nic_config = iwl5000_nic_config,
524 .eeprom_ops = {
525 .regulatory_bands = {
526 EEPROM_REG_BAND_1_CHANNELS,
527 EEPROM_REG_BAND_2_CHANNELS,
528 EEPROM_REG_BAND_3_CHANNELS,
529 EEPROM_REG_BAND_4_CHANNELS,
530 EEPROM_REG_BAND_5_CHANNELS,
531 EEPROM_REG_BAND_24_HT40_CHANNELS,
532 EEPROM_REG_BAND_52_HT40_CHANNELS
533 },
534 },
535 .temperature = iwlagn_temperature, 425 .temperature = iwlagn_temperature,
536}; 426};
537 427
538struct iwl_lib_ops iwl5150_lib = { 428struct iwl_lib_ops iwl5150_lib = {
539 .set_hw_params = iwl5150_hw_set_hw_params, 429 .set_hw_params = iwl5150_hw_set_hw_params,
540 .set_channel_switch = iwl5000_hw_channel_switch, 430 .set_channel_switch = iwl5000_hw_channel_switch,
541 .nic_config = iwl5000_nic_config,
542 .eeprom_ops = {
543 .regulatory_bands = {
544 EEPROM_REG_BAND_1_CHANNELS,
545 EEPROM_REG_BAND_2_CHANNELS,
546 EEPROM_REG_BAND_3_CHANNELS,
547 EEPROM_REG_BAND_4_CHANNELS,
548 EEPROM_REG_BAND_5_CHANNELS,
549 EEPROM_REG_BAND_24_HT40_CHANNELS,
550 EEPROM_REG_BAND_52_HT40_CHANNELS
551 },
552 },
553 .temperature = iwl5150_temperature, 431 .temperature = iwl5150_temperature,
554}; 432};
555 433
@@ -570,8 +448,6 @@ static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
570/* NIC configuration for 6000 series */ 448/* NIC configuration for 6000 series */
571static void iwl6000_nic_config(struct iwl_priv *priv) 449static void iwl6000_nic_config(struct iwl_priv *priv)
572{ 450{
573 iwl_rf_config(priv);
574
575 switch (priv->cfg->device_family) { 451 switch (priv->cfg->device_family) {
576 case IWL_DEVICE_FAMILY_6005: 452 case IWL_DEVICE_FAMILY_6005:
577 case IWL_DEVICE_FAMILY_6030: 453 case IWL_DEVICE_FAMILY_6030:
@@ -584,13 +460,13 @@ static void iwl6000_nic_config(struct iwl_priv *priv)
584 break; 460 break;
585 case IWL_DEVICE_FAMILY_6050: 461 case IWL_DEVICE_FAMILY_6050:
586 /* Indicate calibration version to uCode. */ 462 /* Indicate calibration version to uCode. */
587 if (iwl_eeprom_calib_version(priv) >= 6) 463 if (priv->eeprom_data->calib_version >= 6)
588 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG, 464 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
589 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6); 465 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
590 break; 466 break;
591 case IWL_DEVICE_FAMILY_6150: 467 case IWL_DEVICE_FAMILY_6150:
592 /* Indicate calibration version to uCode. */ 468 /* Indicate calibration version to uCode. */
593 if (iwl_eeprom_calib_version(priv) >= 6) 469 if (priv->eeprom_data->calib_version >= 6)
594 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG, 470 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
595 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6); 471 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
596 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG, 472 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
@@ -627,17 +503,6 @@ static const struct iwl_sensitivity_ranges iwl6000_sensitivity = {
627 503
628static void iwl6000_hw_set_hw_params(struct iwl_priv *priv) 504static void iwl6000_hw_set_hw_params(struct iwl_priv *priv)
629{ 505{
630 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
631 BIT(IEEE80211_BAND_5GHZ);
632
633 priv->hw_params.tx_chains_num =
634 num_of_ant(priv->hw_params.valid_tx_ant);
635 if (priv->cfg->rx_with_siso_diversity)
636 priv->hw_params.rx_chains_num = 1;
637 else
638 priv->hw_params.rx_chains_num =
639 num_of_ant(priv->hw_params.valid_rx_ant);
640
641 iwl6000_set_ct_threshold(priv); 506 iwl6000_set_ct_threshold(priv);
642 507
643 /* Set initial sensitivity parameters */ 508 /* Set initial sensitivity parameters */
@@ -654,7 +519,6 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
654 */ 519 */
655 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 520 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
656 struct iwl6000_channel_switch_cmd cmd; 521 struct iwl6000_channel_switch_cmd cmd;
657 const struct iwl_channel_info *ch_info;
658 u32 switch_time_in_usec, ucode_switch_time; 522 u32 switch_time_in_usec, ucode_switch_time;
659 u16 ch; 523 u16 ch;
660 u32 tsf_low; 524 u32 tsf_low;
@@ -704,14 +568,7 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
704 } 568 }
705 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n", 569 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
706 cmd.switch_time); 570 cmd.switch_time);
707 ch_info = iwl_get_channel_info(priv, priv->band, ch); 571 cmd.expect_beacon = ch_switch->channel->flags & IEEE80211_CHAN_RADAR;
708 if (ch_info)
709 cmd.expect_beacon = is_channel_radar(ch_info);
710 else {
711 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
712 ctx->active.channel, ch);
713 return -EFAULT;
714 }
715 572
716 return iwl_dvm_send_cmd(priv, &hcmd); 573 return iwl_dvm_send_cmd(priv, &hcmd);
717} 574}
@@ -720,18 +577,6 @@ struct iwl_lib_ops iwl6000_lib = {
720 .set_hw_params = iwl6000_hw_set_hw_params, 577 .set_hw_params = iwl6000_hw_set_hw_params,
721 .set_channel_switch = iwl6000_hw_channel_switch, 578 .set_channel_switch = iwl6000_hw_channel_switch,
722 .nic_config = iwl6000_nic_config, 579 .nic_config = iwl6000_nic_config,
723 .eeprom_ops = {
724 .regulatory_bands = {
725 EEPROM_REG_BAND_1_CHANNELS,
726 EEPROM_REG_BAND_2_CHANNELS,
727 EEPROM_REG_BAND_3_CHANNELS,
728 EEPROM_REG_BAND_4_CHANNELS,
729 EEPROM_REG_BAND_5_CHANNELS,
730 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
731 EEPROM_REG_BAND_52_HT40_CHANNELS
732 },
733 .enhanced_txpower = true,
734 },
735 .temperature = iwlagn_temperature, 580 .temperature = iwlagn_temperature,
736}; 581};
737 582
@@ -739,17 +584,5 @@ struct iwl_lib_ops iwl6030_lib = {
739 .set_hw_params = iwl6000_hw_set_hw_params, 584 .set_hw_params = iwl6000_hw_set_hw_params,
740 .set_channel_switch = iwl6000_hw_channel_switch, 585 .set_channel_switch = iwl6000_hw_channel_switch,
741 .nic_config = iwl6000_nic_config, 586 .nic_config = iwl6000_nic_config,
742 .eeprom_ops = {
743 .regulatory_bands = {
744 EEPROM_REG_BAND_1_CHANNELS,
745 EEPROM_REG_BAND_2_CHANNELS,
746 EEPROM_REG_BAND_3_CHANNELS,
747 EEPROM_REG_BAND_4_CHANNELS,
748 EEPROM_REG_BAND_5_CHANNELS,
749 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
750 EEPROM_REG_BAND_52_HT40_CHANNELS
751 },
752 .enhanced_txpower = true,
753 },
754 .temperature = iwlagn_temperature, 587 .temperature = iwlagn_temperature,
755}; 588};
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/dvm/led.c
index 47000419f916..bf479f709091 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/dvm/led.c
@@ -34,12 +34,11 @@
34#include <net/mac80211.h> 34#include <net/mac80211.h>
35#include <linux/etherdevice.h> 35#include <linux/etherdevice.h>
36#include <asm/unaligned.h> 36#include <asm/unaligned.h>
37
38#include "iwl-dev.h"
39#include "iwl-agn.h"
40#include "iwl-io.h" 37#include "iwl-io.h"
41#include "iwl-trans.h" 38#include "iwl-trans.h"
42#include "iwl-modparams.h" 39#include "iwl-modparams.h"
40#include "dev.h"
41#include "agn.h"
43 42
44/* Throughput OFF time(ms) ON time (ms) 43/* Throughput OFF time(ms) ON time (ms)
45 * >300 25 25 44 * >300 25 25
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/dvm/led.h
index b02a853103d3..b02a853103d3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.h
+++ b/drivers/net/wireless/iwlwifi/dvm/led.h
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
index e55ec6c8a920..207ae91a83aa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -33,13 +33,14 @@
33#include <linux/sched.h> 33#include <linux/sched.h>
34#include <net/mac80211.h> 34#include <net/mac80211.h>
35 35
36#include "iwl-dev.h"
37#include "iwl-io.h" 36#include "iwl-io.h"
38#include "iwl-agn-hw.h" 37#include "iwl-agn-hw.h"
39#include "iwl-agn.h"
40#include "iwl-trans.h" 38#include "iwl-trans.h"
41#include "iwl-modparams.h" 39#include "iwl-modparams.h"
42 40
41#include "dev.h"
42#include "agn.h"
43
43int iwlagn_hw_valid_rtc_data_addr(u32 addr) 44int iwlagn_hw_valid_rtc_data_addr(u32 addr)
44{ 45{
45 return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) && 46 return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) &&
@@ -58,8 +59,7 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
58 /* half dBm need to multiply */ 59 /* half dBm need to multiply */
59 tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt); 60 tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
60 61
61 if (priv->tx_power_lmt_in_half_dbm && 62 if (tx_power_cmd.global_lmt > priv->eeprom_data->max_tx_pwr_half_dbm) {
62 priv->tx_power_lmt_in_half_dbm < tx_power_cmd.global_lmt) {
63 /* 63 /*
64 * For the newer devices which using enhanced/extend tx power 64 * For the newer devices which using enhanced/extend tx power
65 * table in EEPROM, the format is in half dBm. driver need to 65 * table in EEPROM, the format is in half dBm. driver need to
@@ -71,7 +71,8 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
71 * "tx_power_user_lmt" is higher than EEPROM value (in 71 * "tx_power_user_lmt" is higher than EEPROM value (in
72 * half-dBm format), lower the tx power based on EEPROM 72 * half-dBm format), lower the tx power based on EEPROM
73 */ 73 */
74 tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm; 74 tx_power_cmd.global_lmt =
75 priv->eeprom_data->max_tx_pwr_half_dbm;
75 } 76 }
76 tx_power_cmd.flags = IWLAGN_TX_POWER_NO_CLOSED; 77 tx_power_cmd.flags = IWLAGN_TX_POWER_NO_CLOSED;
77 tx_power_cmd.srv_chan_lmt = IWLAGN_TX_POWER_AUTO; 78 tx_power_cmd.srv_chan_lmt = IWLAGN_TX_POWER_AUTO;
@@ -159,7 +160,7 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
159 IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK | 160 IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK |
160 IWL_PAN_SCD_MULTICAST_MSK; 161 IWL_PAN_SCD_MULTICAST_MSK;
161 162
162 if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE) 163 if (priv->eeprom_data->sku & EEPROM_SKU_CAP_11N_ENABLE)
163 flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK; 164 flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK;
164 165
165 IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n", 166 IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n",
@@ -617,6 +618,11 @@ static bool iwlagn_fill_txpower_mode(struct iwl_priv *priv,
617 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 618 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
618 int ave_rssi; 619 int ave_rssi;
619 620
621 if (!ctx->vif || (ctx->vif->type != NL80211_IFTYPE_STATION)) {
622 IWL_DEBUG_INFO(priv, "BSS ctx not active or not in sta mode\n");
623 return false;
624 }
625
620 ave_rssi = ieee80211_ave_rssi(ctx->vif); 626 ave_rssi = ieee80211_ave_rssi(ctx->vif);
621 if (!ave_rssi) { 627 if (!ave_rssi) {
622 /* no rssi data, no changes to reduce tx power */ 628 /* no rssi data, no changes to reduce tx power */
@@ -818,7 +824,7 @@ void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
818 if (priv->chain_noise_data.active_chains) 824 if (priv->chain_noise_data.active_chains)
819 active_chains = priv->chain_noise_data.active_chains; 825 active_chains = priv->chain_noise_data.active_chains;
820 else 826 else
821 active_chains = priv->hw_params.valid_rx_ant; 827 active_chains = priv->eeprom_data->valid_rx_ant;
822 828
823 if (priv->cfg->bt_params && 829 if (priv->cfg->bt_params &&
824 priv->cfg->bt_params->advanced_bt_coexist && 830 priv->cfg->bt_params->advanced_bt_coexist &&
@@ -1259,7 +1265,7 @@ int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1259 * the mutex, this ensures we don't try to send two 1265 * the mutex, this ensures we don't try to send two
1260 * (or more) synchronous commands at a time. 1266 * (or more) synchronous commands at a time.
1261 */ 1267 */
1262 if (cmd->flags & CMD_SYNC) 1268 if (!(cmd->flags & CMD_ASYNC))
1263 lockdep_assert_held(&priv->mutex); 1269 lockdep_assert_held(&priv->mutex);
1264 1270
1265 if (priv->ucode_owner == IWL_OWNERSHIP_TM && 1271 if (priv->ucode_owner == IWL_OWNERSHIP_TM &&
diff --git a/drivers/net/wireless/iwlwifi/iwl-mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 013680332f07..a5f7bce96325 100644
--- a/drivers/net/wireless/iwlwifi/iwl-mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -38,19 +38,20 @@
38#include <linux/etherdevice.h> 38#include <linux/etherdevice.h>
39#include <linux/if_arp.h> 39#include <linux/if_arp.h>
40 40
41#include <net/ieee80211_radiotap.h>
41#include <net/mac80211.h> 42#include <net/mac80211.h>
42 43
43#include <asm/div64.h> 44#include <asm/div64.h>
44 45
45#include "iwl-eeprom.h"
46#include "iwl-dev.h"
47#include "iwl-io.h" 46#include "iwl-io.h"
48#include "iwl-agn-calib.h"
49#include "iwl-agn.h"
50#include "iwl-trans.h" 47#include "iwl-trans.h"
51#include "iwl-op-mode.h" 48#include "iwl-op-mode.h"
52#include "iwl-modparams.h" 49#include "iwl-modparams.h"
53 50
51#include "dev.h"
52#include "calib.h"
53#include "agn.h"
54
54/***************************************************************************** 55/*****************************************************************************
55 * 56 *
56 * mac80211 entry point functions 57 * mac80211 entry point functions
@@ -154,6 +155,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
154 IEEE80211_HW_SCAN_WHILE_IDLE; 155 IEEE80211_HW_SCAN_WHILE_IDLE;
155 156
156 hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE; 157 hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE;
158 hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FMT;
157 159
158 /* 160 /*
159 * Including the following line will crash some AP's. This 161 * Including the following line will crash some AP's. This
@@ -162,7 +164,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
162 hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF; 164 hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
163 */ 165 */
164 166
165 if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE) 167 if (priv->eeprom_data->sku & EEPROM_SKU_CAP_11N_ENABLE)
166 hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | 168 hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
167 IEEE80211_HW_SUPPORTS_STATIC_SMPS; 169 IEEE80211_HW_SUPPORTS_STATIC_SMPS;
168 170
@@ -237,12 +239,12 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
237 239
238 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; 240 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
239 241
240 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels) 242 if (priv->eeprom_data->bands[IEEE80211_BAND_2GHZ].n_channels)
241 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 243 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
242 &priv->bands[IEEE80211_BAND_2GHZ]; 244 &priv->eeprom_data->bands[IEEE80211_BAND_2GHZ];
243 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels) 245 if (priv->eeprom_data->bands[IEEE80211_BAND_5GHZ].n_channels)
244 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 246 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
245 &priv->bands[IEEE80211_BAND_5GHZ]; 247 &priv->eeprom_data->bands[IEEE80211_BAND_5GHZ];
246 248
247 hw->wiphy->hw_version = priv->trans->hw_id; 249 hw->wiphy->hw_version = priv->trans->hw_id;
248 250
@@ -341,7 +343,7 @@ static int iwlagn_mac_start(struct ieee80211_hw *hw)
341 return 0; 343 return 0;
342} 344}
343 345
344void iwlagn_mac_stop(struct ieee80211_hw *hw) 346static void iwlagn_mac_stop(struct ieee80211_hw *hw)
345{ 347{
346 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 348 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
347 349
@@ -369,9 +371,9 @@ void iwlagn_mac_stop(struct ieee80211_hw *hw)
369 IWL_DEBUG_MAC80211(priv, "leave\n"); 371 IWL_DEBUG_MAC80211(priv, "leave\n");
370} 372}
371 373
372void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw, 374static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
373 struct ieee80211_vif *vif, 375 struct ieee80211_vif *vif,
374 struct cfg80211_gtk_rekey_data *data) 376 struct cfg80211_gtk_rekey_data *data)
375{ 377{
376 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 378 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
377 379
@@ -397,7 +399,8 @@ void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
397 399
398#ifdef CONFIG_PM_SLEEP 400#ifdef CONFIG_PM_SLEEP
399 401
400int iwlagn_mac_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) 402static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
403 struct cfg80211_wowlan *wowlan)
401{ 404{
402 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 405 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
403 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 406 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
@@ -420,8 +423,6 @@ int iwlagn_mac_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
420 if (ret) 423 if (ret)
421 goto error; 424 goto error;
422 425
423 device_set_wakeup_enable(priv->trans->dev, true);
424
425 iwl_trans_wowlan_suspend(priv->trans); 426 iwl_trans_wowlan_suspend(priv->trans);
426 427
427 goto out; 428 goto out;
@@ -475,7 +476,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
475 } 476 }
476 477
477 if (priv->wowlan_sram) 478 if (priv->wowlan_sram)
478 _iwl_read_targ_mem_words( 479 _iwl_read_targ_mem_dwords(
479 priv->trans, 0x800000, 480 priv->trans, 0x800000,
480 priv->wowlan_sram, 481 priv->wowlan_sram,
481 img->sec[IWL_UCODE_SECTION_DATA].len / 4); 482 img->sec[IWL_UCODE_SECTION_DATA].len / 4);
@@ -488,8 +489,6 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
488 489
489 priv->wowlan = false; 490 priv->wowlan = false;
490 491
491 device_set_wakeup_enable(priv->trans->dev, false);
492
493 iwlagn_prepare_restart(priv); 492 iwlagn_prepare_restart(priv);
494 493
495 memset((void *)&ctx->active, 0, sizeof(ctx->active)); 494 memset((void *)&ctx->active, 0, sizeof(ctx->active));
@@ -504,9 +503,15 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
504 return 1; 503 return 1;
505} 504}
506 505
506static void iwlagn_mac_set_wakeup(struct ieee80211_hw *hw, bool enabled)
507{
508 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
509
510 device_set_wakeup_enable(priv->trans->dev, enabled);
511}
507#endif 512#endif
508 513
509void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 514static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
510{ 515{
511 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 516 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
512 517
@@ -517,21 +522,21 @@ void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
517 dev_kfree_skb_any(skb); 522 dev_kfree_skb_any(skb);
518} 523}
519 524
520void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw, 525static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
521 struct ieee80211_vif *vif, 526 struct ieee80211_vif *vif,
522 struct ieee80211_key_conf *keyconf, 527 struct ieee80211_key_conf *keyconf,
523 struct ieee80211_sta *sta, 528 struct ieee80211_sta *sta,
524 u32 iv32, u16 *phase1key) 529 u32 iv32, u16 *phase1key)
525{ 530{
526 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 531 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
527 532
528 iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key); 533 iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key);
529} 534}
530 535
531int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 536static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
532 struct ieee80211_vif *vif, 537 struct ieee80211_vif *vif,
533 struct ieee80211_sta *sta, 538 struct ieee80211_sta *sta,
534 struct ieee80211_key_conf *key) 539 struct ieee80211_key_conf *key)
535{ 540{
536 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 541 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
537 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; 542 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
@@ -631,11 +636,11 @@ int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
631 return ret; 636 return ret;
632} 637}
633 638
634int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, 639static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
635 struct ieee80211_vif *vif, 640 struct ieee80211_vif *vif,
636 enum ieee80211_ampdu_mlme_action action, 641 enum ieee80211_ampdu_mlme_action action,
637 struct ieee80211_sta *sta, u16 tid, u16 *ssn, 642 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
638 u8 buf_size) 643 u8 buf_size)
639{ 644{
640 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 645 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
641 int ret = -EINVAL; 646 int ret = -EINVAL;
@@ -644,7 +649,7 @@ int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
644 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", 649 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
645 sta->addr, tid); 650 sta->addr, tid);
646 651
647 if (!(priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE)) 652 if (!(priv->eeprom_data->sku & EEPROM_SKU_CAP_11N_ENABLE))
648 return -EACCES; 653 return -EACCES;
649 654
650 IWL_DEBUG_MAC80211(priv, "enter\n"); 655 IWL_DEBUG_MAC80211(priv, "enter\n");
@@ -662,7 +667,7 @@ int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
662 ret = iwl_sta_rx_agg_stop(priv, sta, tid); 667 ret = iwl_sta_rx_agg_stop(priv, sta, tid);
663 break; 668 break;
664 case IEEE80211_AMPDU_TX_START: 669 case IEEE80211_AMPDU_TX_START:
665 if (!priv->trans->ops->tx_agg_setup) 670 if (!priv->trans->ops->txq_enable)
666 break; 671 break;
667 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) 672 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
668 break; 673 break;
@@ -757,11 +762,11 @@ static int iwlagn_mac_sta_remove(struct ieee80211_hw *hw,
757 return ret; 762 return ret;
758} 763}
759 764
760int iwlagn_mac_sta_state(struct ieee80211_hw *hw, 765static int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
761 struct ieee80211_vif *vif, 766 struct ieee80211_vif *vif,
762 struct ieee80211_sta *sta, 767 struct ieee80211_sta *sta,
763 enum ieee80211_sta_state old_state, 768 enum ieee80211_sta_state old_state,
764 enum ieee80211_sta_state new_state) 769 enum ieee80211_sta_state new_state)
765{ 770{
766 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 771 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
767 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; 772 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
@@ -852,11 +857,10 @@ int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
852 return ret; 857 return ret;
853} 858}
854 859
855void iwlagn_mac_channel_switch(struct ieee80211_hw *hw, 860static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
856 struct ieee80211_channel_switch *ch_switch) 861 struct ieee80211_channel_switch *ch_switch)
857{ 862{
858 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 863 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
859 const struct iwl_channel_info *ch_info;
860 struct ieee80211_conf *conf = &hw->conf; 864 struct ieee80211_conf *conf = &hw->conf;
861 struct ieee80211_channel *channel = ch_switch->channel; 865 struct ieee80211_channel *channel = ch_switch->channel;
862 struct iwl_ht_config *ht_conf = &priv->current_ht_config; 866 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
@@ -893,12 +897,6 @@ void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
893 if (le16_to_cpu(ctx->active.channel) == ch) 897 if (le16_to_cpu(ctx->active.channel) == ch)
894 goto out; 898 goto out;
895 899
896 ch_info = iwl_get_channel_info(priv, channel->band, ch);
897 if (!is_channel_valid(ch_info)) {
898 IWL_DEBUG_MAC80211(priv, "invalid channel\n");
899 goto out;
900 }
901
902 priv->current_ht_config.smps = conf->smps_mode; 900 priv->current_ht_config.smps = conf->smps_mode;
903 901
904 /* Configure HT40 channels */ 902 /* Configure HT40 channels */
@@ -947,10 +945,10 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
947 ieee80211_chswitch_done(ctx->vif, is_success); 945 ieee80211_chswitch_done(ctx->vif, is_success);
948} 946}
949 947
950void iwlagn_configure_filter(struct ieee80211_hw *hw, 948static void iwlagn_configure_filter(struct ieee80211_hw *hw,
951 unsigned int changed_flags, 949 unsigned int changed_flags,
952 unsigned int *total_flags, 950 unsigned int *total_flags,
953 u64 multicast) 951 u64 multicast)
954{ 952{
955 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 953 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
956 __le32 filter_or = 0, filter_nand = 0; 954 __le32 filter_or = 0, filter_nand = 0;
@@ -997,7 +995,7 @@ void iwlagn_configure_filter(struct ieee80211_hw *hw,
997 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; 995 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
998} 996}
999 997
1000void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop) 998static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
1001{ 999{
1002 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1000 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1003 1001
@@ -1050,8 +1048,18 @@ static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
1050 mutex_lock(&priv->mutex); 1048 mutex_lock(&priv->mutex);
1051 1049
1052 if (test_bit(STATUS_SCAN_HW, &priv->status)) { 1050 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
1053 err = -EBUSY; 1051 /* mac80211 should not scan while ROC or ROC while scanning */
1054 goto out; 1052 if (WARN_ON_ONCE(priv->scan_type != IWL_SCAN_RADIO_RESET)) {
1053 err = -EBUSY;
1054 goto out;
1055 }
1056
1057 iwl_scan_cancel_timeout(priv, 100);
1058
1059 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
1060 err = -EBUSY;
1061 goto out;
1062 }
1055 } 1063 }
1056 1064
1057 priv->hw_roc_channel = channel; 1065 priv->hw_roc_channel = channel;
@@ -1124,7 +1132,7 @@ static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
1124 return err; 1132 return err;
1125} 1133}
1126 1134
1127int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw) 1135static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
1128{ 1136{
1129 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1137 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1130 1138
@@ -1141,8 +1149,8 @@ int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
1141 return 0; 1149 return 0;
1142} 1150}
1143 1151
1144void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw, 1152static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
1145 enum ieee80211_rssi_event rssi_event) 1153 enum ieee80211_rssi_event rssi_event)
1146{ 1154{
1147 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1155 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1148 1156
@@ -1166,8 +1174,8 @@ void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
1166 IWL_DEBUG_MAC80211(priv, "leave\n"); 1174 IWL_DEBUG_MAC80211(priv, "leave\n");
1167} 1175}
1168 1176
1169int iwlagn_mac_set_tim(struct ieee80211_hw *hw, 1177static int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
1170 struct ieee80211_sta *sta, bool set) 1178 struct ieee80211_sta *sta, bool set)
1171{ 1179{
1172 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1180 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1173 1181
@@ -1176,9 +1184,9 @@ int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
1176 return 0; 1184 return 0;
1177} 1185}
1178 1186
1179int iwlagn_mac_conf_tx(struct ieee80211_hw *hw, 1187static int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
1180 struct ieee80211_vif *vif, u16 queue, 1188 struct ieee80211_vif *vif, u16 queue,
1181 const struct ieee80211_tx_queue_params *params) 1189 const struct ieee80211_tx_queue_params *params)
1182{ 1190{
1183 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1191 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1184 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; 1192 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
@@ -1220,7 +1228,7 @@ int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
1220 return 0; 1228 return 0;
1221} 1229}
1222 1230
1223int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw) 1231static int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw)
1224{ 1232{
1225 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1233 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1226 1234
@@ -1236,7 +1244,8 @@ static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1236 return iwlagn_commit_rxon(priv, ctx); 1244 return iwlagn_commit_rxon(priv, ctx);
1237} 1245}
1238 1246
1239int iwl_setup_interface(struct iwl_priv *priv, struct iwl_rxon_context *ctx) 1247static int iwl_setup_interface(struct iwl_priv *priv,
1248 struct iwl_rxon_context *ctx)
1240{ 1249{
1241 struct ieee80211_vif *vif = ctx->vif; 1250 struct ieee80211_vif *vif = ctx->vif;
1242 int err, ac; 1251 int err, ac;
@@ -1356,9 +1365,9 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
1356 return err; 1365 return err;
1357} 1366}
1358 1367
1359void iwl_teardown_interface(struct iwl_priv *priv, 1368static void iwl_teardown_interface(struct iwl_priv *priv,
1360 struct ieee80211_vif *vif, 1369 struct ieee80211_vif *vif,
1361 bool mode_change) 1370 bool mode_change)
1362{ 1371{
1363 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); 1372 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1364 1373
@@ -1414,13 +1423,11 @@ static void iwlagn_mac_remove_interface(struct ieee80211_hw *hw,
1414} 1423}
1415 1424
1416static int iwlagn_mac_change_interface(struct ieee80211_hw *hw, 1425static int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
1417 struct ieee80211_vif *vif, 1426 struct ieee80211_vif *vif,
1418 enum nl80211_iftype newtype, bool newp2p) 1427 enum nl80211_iftype newtype, bool newp2p)
1419{ 1428{
1420 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1429 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1421 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); 1430 struct iwl_rxon_context *ctx, *tmp;
1422 struct iwl_rxon_context *bss_ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1423 struct iwl_rxon_context *tmp;
1424 enum nl80211_iftype newviftype = newtype; 1431 enum nl80211_iftype newviftype = newtype;
1425 u32 interface_modes; 1432 u32 interface_modes;
1426 int err; 1433 int err;
@@ -1431,6 +1438,18 @@ static int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
1431 1438
1432 mutex_lock(&priv->mutex); 1439 mutex_lock(&priv->mutex);
1433 1440
1441 ctx = iwl_rxon_ctx_from_vif(vif);
1442
1443 /*
1444 * To simplify this code, only support changes on the
1445 * BSS context. The PAN context is usually reassigned
1446 * by creating/removing P2P interfaces anyway.
1447 */
1448 if (ctx->ctxid != IWL_RXON_CTX_BSS) {
1449 err = -EBUSY;
1450 goto out;
1451 }
1452
1434 if (!ctx->vif || !iwl_is_ready_rf(priv)) { 1453 if (!ctx->vif || !iwl_is_ready_rf(priv)) {
1435 /* 1454 /*
1436 * Huh? But wait ... this can maybe happen when 1455 * Huh? But wait ... this can maybe happen when
@@ -1440,32 +1459,19 @@ static int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
1440 goto out; 1459 goto out;
1441 } 1460 }
1442 1461
1462 /* Check if the switch is supported in the same context */
1443 interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes; 1463 interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
1444
1445 if (!(interface_modes & BIT(newtype))) { 1464 if (!(interface_modes & BIT(newtype))) {
1446 err = -EBUSY; 1465 err = -EBUSY;
1447 goto out; 1466 goto out;
1448 } 1467 }
1449 1468
1450 /*
1451 * Refuse a change that should be done by moving from the PAN
1452 * context to the BSS context instead, if the BSS context is
1453 * available and can support the new interface type.
1454 */
1455 if (ctx->ctxid == IWL_RXON_CTX_PAN && !bss_ctx->vif &&
1456 (bss_ctx->interface_modes & BIT(newtype) ||
1457 bss_ctx->exclusive_interface_modes & BIT(newtype))) {
1458 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
1459 err = -EBUSY;
1460 goto out;
1461 }
1462
1463 if (ctx->exclusive_interface_modes & BIT(newtype)) { 1469 if (ctx->exclusive_interface_modes & BIT(newtype)) {
1464 for_each_context(priv, tmp) { 1470 for_each_context(priv, tmp) {
1465 if (ctx == tmp) 1471 if (ctx == tmp)
1466 continue; 1472 continue;
1467 1473
1468 if (!tmp->vif) 1474 if (!tmp->is_active)
1469 continue; 1475 continue;
1470 1476
1471 /* 1477 /*
@@ -1499,9 +1505,9 @@ static int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
1499 return err; 1505 return err;
1500} 1506}
1501 1507
1502int iwlagn_mac_hw_scan(struct ieee80211_hw *hw, 1508static int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
1503 struct ieee80211_vif *vif, 1509 struct ieee80211_vif *vif,
1504 struct cfg80211_scan_request *req) 1510 struct cfg80211_scan_request *req)
1505{ 1511{
1506 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1512 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1507 int ret; 1513 int ret;
@@ -1556,10 +1562,10 @@ static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
1556 iwl_send_add_sta(priv, &cmd, CMD_ASYNC); 1562 iwl_send_add_sta(priv, &cmd, CMD_ASYNC);
1557} 1563}
1558 1564
1559void iwlagn_mac_sta_notify(struct ieee80211_hw *hw, 1565static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
1560 struct ieee80211_vif *vif, 1566 struct ieee80211_vif *vif,
1561 enum sta_notify_cmd cmd, 1567 enum sta_notify_cmd cmd,
1562 struct ieee80211_sta *sta) 1568 struct ieee80211_sta *sta)
1563{ 1569{
1564 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1570 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1565 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; 1571 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
@@ -1596,6 +1602,7 @@ struct ieee80211_ops iwlagn_hw_ops = {
1596#ifdef CONFIG_PM_SLEEP 1602#ifdef CONFIG_PM_SLEEP
1597 .suspend = iwlagn_mac_suspend, 1603 .suspend = iwlagn_mac_suspend,
1598 .resume = iwlagn_mac_resume, 1604 .resume = iwlagn_mac_resume,
1605 .set_wakeup = iwlagn_mac_set_wakeup,
1599#endif 1606#endif
1600 .add_interface = iwlagn_mac_add_interface, 1607 .add_interface = iwlagn_mac_add_interface,
1601 .remove_interface = iwlagn_mac_remove_interface, 1608 .remove_interface = iwlagn_mac_remove_interface,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index ec36e2b020b6..612f05d757db 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -44,15 +44,19 @@
44 44
45#include <asm/div64.h> 45#include <asm/div64.h>
46 46
47#include "iwl-eeprom.h" 47#include "iwl-eeprom-read.h"
48#include "iwl-dev.h" 48#include "iwl-eeprom-parse.h"
49#include "iwl-io.h" 49#include "iwl-io.h"
50#include "iwl-agn-calib.h"
51#include "iwl-agn.h"
52#include "iwl-trans.h" 50#include "iwl-trans.h"
53#include "iwl-op-mode.h" 51#include "iwl-op-mode.h"
54#include "iwl-drv.h" 52#include "iwl-drv.h"
55#include "iwl-modparams.h" 53#include "iwl-modparams.h"
54#include "iwl-prph.h"
55
56#include "dev.h"
57#include "calib.h"
58#include "agn.h"
59
56 60
57/****************************************************************************** 61/******************************************************************************
58 * 62 *
@@ -78,7 +82,8 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION);
78MODULE_VERSION(DRV_VERSION); 82MODULE_VERSION(DRV_VERSION);
79MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); 83MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
80MODULE_LICENSE("GPL"); 84MODULE_LICENSE("GPL");
81MODULE_ALIAS("iwlagn"); 85
86static const struct iwl_op_mode_ops iwl_dvm_ops;
82 87
83void iwl_update_chain_flags(struct iwl_priv *priv) 88void iwl_update_chain_flags(struct iwl_priv *priv)
84{ 89{
@@ -180,7 +185,7 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
180 rate = info->control.rates[0].idx; 185 rate = info->control.rates[0].idx;
181 186
182 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, 187 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
183 priv->hw_params.valid_tx_ant); 188 priv->eeprom_data->valid_tx_ant);
184 rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant); 189 rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
185 190
186 /* In mac80211, rates for 5 GHz start at 0 */ 191 /* In mac80211, rates for 5 GHz start at 0 */
@@ -403,7 +408,7 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
403 408
404 base = priv->device_pointers.log_event_table; 409 base = priv->device_pointers.log_event_table;
405 if (iwlagn_hw_valid_rtc_data_addr(base)) { 410 if (iwlagn_hw_valid_rtc_data_addr(base)) {
406 iwl_read_targ_mem_words(priv->trans, base, &read, sizeof(read)); 411 iwl_read_targ_mem_bytes(priv->trans, base, &read, sizeof(read));
407 capacity = read.capacity; 412 capacity = read.capacity;
408 mode = read.mode; 413 mode = read.mode;
409 num_wraps = read.wrap_counter; 414 num_wraps = read.wrap_counter;
@@ -513,49 +518,6 @@ static void iwl_bg_tx_flush(struct work_struct *work)
513 * queue/FIFO/AC mapping definitions 518 * queue/FIFO/AC mapping definitions
514 */ 519 */
515 520
516#define IWL_TX_FIFO_BK 0 /* shared */
517#define IWL_TX_FIFO_BE 1
518#define IWL_TX_FIFO_VI 2 /* shared */
519#define IWL_TX_FIFO_VO 3
520#define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK
521#define IWL_TX_FIFO_BE_IPAN 4
522#define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI
523#define IWL_TX_FIFO_VO_IPAN 5
524/* re-uses the VO FIFO, uCode will properly flush/schedule */
525#define IWL_TX_FIFO_AUX 5
526#define IWL_TX_FIFO_UNUSED -1
527
528#define IWLAGN_CMD_FIFO_NUM 7
529
530/*
531 * This queue number is required for proper operation
532 * because the ucode will stop/start the scheduler as
533 * required.
534 */
535#define IWL_IPAN_MCAST_QUEUE 8
536
537static const u8 iwlagn_default_queue_to_tx_fifo[] = {
538 IWL_TX_FIFO_VO,
539 IWL_TX_FIFO_VI,
540 IWL_TX_FIFO_BE,
541 IWL_TX_FIFO_BK,
542 IWLAGN_CMD_FIFO_NUM,
543};
544
545static const u8 iwlagn_ipan_queue_to_tx_fifo[] = {
546 IWL_TX_FIFO_VO,
547 IWL_TX_FIFO_VI,
548 IWL_TX_FIFO_BE,
549 IWL_TX_FIFO_BK,
550 IWL_TX_FIFO_BK_IPAN,
551 IWL_TX_FIFO_BE_IPAN,
552 IWL_TX_FIFO_VI_IPAN,
553 IWL_TX_FIFO_VO_IPAN,
554 IWL_TX_FIFO_BE_IPAN,
555 IWLAGN_CMD_FIFO_NUM,
556 IWL_TX_FIFO_AUX,
557};
558
559static const u8 iwlagn_bss_ac_to_fifo[] = { 521static const u8 iwlagn_bss_ac_to_fifo[] = {
560 IWL_TX_FIFO_VO, 522 IWL_TX_FIFO_VO,
561 IWL_TX_FIFO_VI, 523 IWL_TX_FIFO_VI,
@@ -578,7 +540,7 @@ static const u8 iwlagn_pan_ac_to_queue[] = {
578 7, 6, 5, 4, 540 7, 6, 5, 4,
579}; 541};
580 542
581void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags) 543static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
582{ 544{
583 int i; 545 int i;
584 546
@@ -645,7 +607,7 @@ void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
645 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); 607 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
646} 608}
647 609
648void iwl_rf_kill_ct_config(struct iwl_priv *priv) 610static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
649{ 611{
650 struct iwl_ct_kill_config cmd; 612 struct iwl_ct_kill_config cmd;
651 struct iwl_ct_kill_throttling_config adv_cmd; 613 struct iwl_ct_kill_throttling_config adv_cmd;
@@ -726,7 +688,7 @@ static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
726 } 688 }
727} 689}
728 690
729void iwl_send_bt_config(struct iwl_priv *priv) 691static void iwl_send_bt_config(struct iwl_priv *priv)
730{ 692{
731 struct iwl_bt_cmd bt_cmd = { 693 struct iwl_bt_cmd bt_cmd = {
732 .lead_time = BT_LEAD_TIME_DEF, 694 .lead_time = BT_LEAD_TIME_DEF,
@@ -814,7 +776,7 @@ int iwl_alive_start(struct iwl_priv *priv)
814 ieee80211_wake_queues(priv->hw); 776 ieee80211_wake_queues(priv->hw);
815 777
816 /* Configure Tx antenna selection based on H/W config */ 778 /* Configure Tx antenna selection based on H/W config */
817 iwlagn_send_tx_ant_config(priv, priv->hw_params.valid_tx_ant); 779 iwlagn_send_tx_ant_config(priv, priv->eeprom_data->valid_tx_ant);
818 780
819 if (iwl_is_associated_ctx(ctx) && !priv->wowlan) { 781 if (iwl_is_associated_ctx(ctx) && !priv->wowlan) {
820 struct iwl_rxon_cmd *active_rxon = 782 struct iwl_rxon_cmd *active_rxon =
@@ -932,11 +894,12 @@ void iwl_down(struct iwl_priv *priv)
932 priv->ucode_loaded = false; 894 priv->ucode_loaded = false;
933 iwl_trans_stop_device(priv->trans); 895 iwl_trans_stop_device(priv->trans);
934 896
897 /* Set num_aux_in_flight must be done after the transport is stopped */
898 atomic_set(&priv->num_aux_in_flight, 0);
899
935 /* Clear out all status bits but a few that are stable across reset */ 900 /* Clear out all status bits but a few that are stable across reset */
936 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) << 901 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
937 STATUS_RF_KILL_HW | 902 STATUS_RF_KILL_HW |
938 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
939 STATUS_GEO_CONFIGURED |
940 test_bit(STATUS_FW_ERROR, &priv->status) << 903 test_bit(STATUS_FW_ERROR, &priv->status) <<
941 STATUS_FW_ERROR | 904 STATUS_FW_ERROR |
942 test_bit(STATUS_EXIT_PENDING, &priv->status) << 905 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
@@ -1078,7 +1041,7 @@ static void iwlagn_disable_roc_work(struct work_struct *work)
1078 * 1041 *
1079 *****************************************************************************/ 1042 *****************************************************************************/
1080 1043
1081void iwl_setup_deferred_work(struct iwl_priv *priv) 1044static void iwl_setup_deferred_work(struct iwl_priv *priv)
1082{ 1045{
1083 priv->workqueue = create_singlethread_workqueue(DRV_NAME); 1046 priv->workqueue = create_singlethread_workqueue(DRV_NAME);
1084 1047
@@ -1123,224 +1086,14 @@ void iwl_cancel_deferred_work(struct iwl_priv *priv)
1123 del_timer_sync(&priv->ucode_trace); 1086 del_timer_sync(&priv->ucode_trace);
1124} 1087}
1125 1088
1126static void iwl_init_hw_rates(struct ieee80211_rate *rates) 1089static int iwl_init_drv(struct iwl_priv *priv)
1127{
1128 int i;
1129
1130 for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
1131 rates[i].bitrate = iwl_rates[i].ieee * 5;
1132 rates[i].hw_value = i; /* Rate scaling will work on indexes */
1133 rates[i].hw_value_short = i;
1134 rates[i].flags = 0;
1135 if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
1136 /*
1137 * If CCK != 1M then set short preamble rate flag.
1138 */
1139 rates[i].flags |=
1140 (iwl_rates[i].plcp == IWL_RATE_1M_PLCP) ?
1141 0 : IEEE80211_RATE_SHORT_PREAMBLE;
1142 }
1143 }
1144}
1145
1146#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
1147#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
1148static void iwl_init_ht_hw_capab(const struct iwl_priv *priv,
1149 struct ieee80211_sta_ht_cap *ht_info,
1150 enum ieee80211_band band)
1151{
1152 u16 max_bit_rate = 0;
1153 u8 rx_chains_num = priv->hw_params.rx_chains_num;
1154 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1155
1156 ht_info->cap = 0;
1157 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
1158
1159 ht_info->ht_supported = true;
1160
1161 if (priv->cfg->ht_params &&
1162 priv->cfg->ht_params->ht_greenfield_support)
1163 ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
1164 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
1165 max_bit_rate = MAX_BIT_RATE_20_MHZ;
1166 if (priv->hw_params.ht40_channel & BIT(band)) {
1167 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
1168 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
1169 ht_info->mcs.rx_mask[4] = 0x01;
1170 max_bit_rate = MAX_BIT_RATE_40_MHZ;
1171 }
1172
1173 if (iwlwifi_mod_params.amsdu_size_8K)
1174 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
1175
1176 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
1177 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
1178
1179 ht_info->mcs.rx_mask[0] = 0xFF;
1180 if (rx_chains_num >= 2)
1181 ht_info->mcs.rx_mask[1] = 0xFF;
1182 if (rx_chains_num >= 3)
1183 ht_info->mcs.rx_mask[2] = 0xFF;
1184
1185 /* Highest supported Rx data rate */
1186 max_bit_rate *= rx_chains_num;
1187 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
1188 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
1189
1190 /* Tx MCS capabilities */
1191 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
1192 if (tx_chains_num != rx_chains_num) {
1193 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
1194 ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
1195 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
1196 }
1197}
1198
1199/**
1200 * iwl_init_geos - Initialize mac80211's geo/channel info based from eeprom
1201 */
1202static int iwl_init_geos(struct iwl_priv *priv)
1203{
1204 struct iwl_channel_info *ch;
1205 struct ieee80211_supported_band *sband;
1206 struct ieee80211_channel *channels;
1207 struct ieee80211_channel *geo_ch;
1208 struct ieee80211_rate *rates;
1209 int i = 0;
1210 s8 max_tx_power = IWLAGN_TX_POWER_TARGET_POWER_MIN;
1211
1212 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
1213 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
1214 IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
1215 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
1216 return 0;
1217 }
1218
1219 channels = kcalloc(priv->channel_count,
1220 sizeof(struct ieee80211_channel), GFP_KERNEL);
1221 if (!channels)
1222 return -ENOMEM;
1223
1224 rates = kcalloc(IWL_RATE_COUNT_LEGACY, sizeof(struct ieee80211_rate),
1225 GFP_KERNEL);
1226 if (!rates) {
1227 kfree(channels);
1228 return -ENOMEM;
1229 }
1230
1231 /* 5.2GHz channels start after the 2.4GHz channels */
1232 sband = &priv->bands[IEEE80211_BAND_5GHZ];
1233 sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
1234 /* just OFDM */
1235 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
1236 sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
1237
1238 if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE)
1239 iwl_init_ht_hw_capab(priv, &sband->ht_cap,
1240 IEEE80211_BAND_5GHZ);
1241
1242 sband = &priv->bands[IEEE80211_BAND_2GHZ];
1243 sband->channels = channels;
1244 /* OFDM & CCK */
1245 sband->bitrates = rates;
1246 sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
1247
1248 if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE)
1249 iwl_init_ht_hw_capab(priv, &sband->ht_cap,
1250 IEEE80211_BAND_2GHZ);
1251
1252 priv->ieee_channels = channels;
1253 priv->ieee_rates = rates;
1254
1255 for (i = 0; i < priv->channel_count; i++) {
1256 ch = &priv->channel_info[i];
1257
1258 /* FIXME: might be removed if scan is OK */
1259 if (!is_channel_valid(ch))
1260 continue;
1261
1262 sband = &priv->bands[ch->band];
1263
1264 geo_ch = &sband->channels[sband->n_channels++];
1265
1266 geo_ch->center_freq =
1267 ieee80211_channel_to_frequency(ch->channel, ch->band);
1268 geo_ch->max_power = ch->max_power_avg;
1269 geo_ch->max_antenna_gain = 0xff;
1270 geo_ch->hw_value = ch->channel;
1271
1272 if (is_channel_valid(ch)) {
1273 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
1274 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
1275
1276 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
1277 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
1278
1279 if (ch->flags & EEPROM_CHANNEL_RADAR)
1280 geo_ch->flags |= IEEE80211_CHAN_RADAR;
1281
1282 geo_ch->flags |= ch->ht40_extension_channel;
1283
1284 if (ch->max_power_avg > max_tx_power)
1285 max_tx_power = ch->max_power_avg;
1286 } else {
1287 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
1288 }
1289
1290 IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
1291 ch->channel, geo_ch->center_freq,
1292 is_channel_a_band(ch) ? "5.2" : "2.4",
1293 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
1294 "restricted" : "valid",
1295 geo_ch->flags);
1296 }
1297
1298 priv->tx_power_device_lmt = max_tx_power;
1299 priv->tx_power_user_lmt = max_tx_power;
1300 priv->tx_power_next = max_tx_power;
1301
1302 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
1303 priv->hw_params.sku & EEPROM_SKU_CAP_BAND_52GHZ) {
1304 IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
1305 "Please send your %s to maintainer.\n",
1306 priv->trans->hw_id_str);
1307 priv->hw_params.sku &= ~EEPROM_SKU_CAP_BAND_52GHZ;
1308 }
1309
1310 if (iwlwifi_mod_params.disable_5ghz)
1311 priv->bands[IEEE80211_BAND_5GHZ].n_channels = 0;
1312
1313 IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
1314 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
1315 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
1316
1317 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
1318
1319 return 0;
1320}
1321
1322/*
1323 * iwl_free_geos - undo allocations in iwl_init_geos
1324 */
1325static void iwl_free_geos(struct iwl_priv *priv)
1326{
1327 kfree(priv->ieee_channels);
1328 kfree(priv->ieee_rates);
1329 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
1330}
1331
1332int iwl_init_drv(struct iwl_priv *priv)
1333{ 1090{
1334 int ret;
1335
1336 spin_lock_init(&priv->sta_lock); 1091 spin_lock_init(&priv->sta_lock);
1337 1092
1338 mutex_init(&priv->mutex); 1093 mutex_init(&priv->mutex);
1339 1094
1340 INIT_LIST_HEAD(&priv->calib_results); 1095 INIT_LIST_HEAD(&priv->calib_results);
1341 1096
1342 priv->ieee_channels = NULL;
1343 priv->ieee_rates = NULL;
1344 priv->band = IEEE80211_BAND_2GHZ; 1097 priv->band = IEEE80211_BAND_2GHZ;
1345 1098
1346 priv->plcp_delta_threshold = 1099 priv->plcp_delta_threshold =
@@ -1371,31 +1124,11 @@ int iwl_init_drv(struct iwl_priv *priv)
1371 priv->dynamic_frag_thresh = BT_FRAG_THRESHOLD_DEF; 1124 priv->dynamic_frag_thresh = BT_FRAG_THRESHOLD_DEF;
1372 } 1125 }
1373 1126
1374 ret = iwl_init_channel_map(priv);
1375 if (ret) {
1376 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
1377 goto err;
1378 }
1379
1380 ret = iwl_init_geos(priv);
1381 if (ret) {
1382 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
1383 goto err_free_channel_map;
1384 }
1385 iwl_init_hw_rates(priv->ieee_rates);
1386
1387 return 0; 1127 return 0;
1388
1389err_free_channel_map:
1390 iwl_free_channel_map(priv);
1391err:
1392 return ret;
1393} 1128}
1394 1129
1395void iwl_uninit_drv(struct iwl_priv *priv) 1130static void iwl_uninit_drv(struct iwl_priv *priv)
1396{ 1131{
1397 iwl_free_geos(priv);
1398 iwl_free_channel_map(priv);
1399 kfree(priv->scan_cmd); 1132 kfree(priv->scan_cmd);
1400 kfree(priv->beacon_cmd); 1133 kfree(priv->beacon_cmd);
1401 kfree(rcu_dereference_raw(priv->noa_data)); 1134 kfree(rcu_dereference_raw(priv->noa_data));
@@ -1405,15 +1138,12 @@ void iwl_uninit_drv(struct iwl_priv *priv)
1405#endif 1138#endif
1406} 1139}
1407 1140
1408void iwl_set_hw_params(struct iwl_priv *priv) 1141static void iwl_set_hw_params(struct iwl_priv *priv)
1409{ 1142{
1410 if (priv->cfg->ht_params) 1143 if (priv->cfg->ht_params)
1411 priv->hw_params.use_rts_for_aggregation = 1144 priv->hw_params.use_rts_for_aggregation =
1412 priv->cfg->ht_params->use_rts_for_aggregation; 1145 priv->cfg->ht_params->use_rts_for_aggregation;
1413 1146
1414 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
1415 priv->hw_params.sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
1416
1417 /* Device-specific setup */ 1147 /* Device-specific setup */
1418 priv->lib->set_hw_params(priv); 1148 priv->lib->set_hw_params(priv);
1419} 1149}
@@ -1421,7 +1151,7 @@ void iwl_set_hw_params(struct iwl_priv *priv)
1421 1151
1422 1152
1423/* show what optional capabilities we have */ 1153/* show what optional capabilities we have */
1424void iwl_option_config(struct iwl_priv *priv) 1154static void iwl_option_config(struct iwl_priv *priv)
1425{ 1155{
1426#ifdef CONFIG_IWLWIFI_DEBUG 1156#ifdef CONFIG_IWLWIFI_DEBUG
1427 IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUG enabled\n"); 1157 IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUG enabled\n");
@@ -1454,6 +1184,42 @@ void iwl_option_config(struct iwl_priv *priv)
1454#endif 1184#endif
1455} 1185}
1456 1186
1187static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
1188{
1189 u16 radio_cfg;
1190
1191 priv->eeprom_data->sku = priv->eeprom_data->sku;
1192
1193 if (priv->eeprom_data->sku & EEPROM_SKU_CAP_11N_ENABLE &&
1194 !priv->cfg->ht_params) {
1195 IWL_ERR(priv, "Invalid 11n configuration\n");
1196 return -EINVAL;
1197 }
1198
1199 if (!priv->eeprom_data->sku) {
1200 IWL_ERR(priv, "Invalid device sku\n");
1201 return -EINVAL;
1202 }
1203
1204 IWL_INFO(priv, "Device SKU: 0x%X\n", priv->eeprom_data->sku);
1205
1206 radio_cfg = priv->eeprom_data->radio_cfg;
1207
1208 priv->hw_params.tx_chains_num =
1209 num_of_ant(priv->eeprom_data->valid_tx_ant);
1210 if (priv->cfg->rx_with_siso_diversity)
1211 priv->hw_params.rx_chains_num = 1;
1212 else
1213 priv->hw_params.rx_chains_num =
1214 num_of_ant(priv->eeprom_data->valid_rx_ant);
1215
1216 IWL_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n",
1217 priv->eeprom_data->valid_tx_ant,
1218 priv->eeprom_data->valid_rx_ant);
1219
1220 return 0;
1221}
1222
1457static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, 1223static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1458 const struct iwl_cfg *cfg, 1224 const struct iwl_cfg *cfg,
1459 const struct iwl_fw *fw) 1225 const struct iwl_fw *fw)
@@ -1539,8 +1305,12 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1539 trans_cfg.queue_watchdog_timeout = 1305 trans_cfg.queue_watchdog_timeout =
1540 priv->cfg->base_params->wd_timeout; 1306 priv->cfg->base_params->wd_timeout;
1541 else 1307 else
1542 trans_cfg.queue_watchdog_timeout = IWL_WATCHHDOG_DISABLED; 1308 trans_cfg.queue_watchdog_timeout = IWL_WATCHDOG_DISABLED;
1543 trans_cfg.command_names = iwl_dvm_cmd_strings; 1309 trans_cfg.command_names = iwl_dvm_cmd_strings;
1310 trans_cfg.cmd_fifo = IWLAGN_CMD_FIFO_NUM;
1311
1312 WARN_ON(sizeof(priv->transport_queue_stop) * BITS_PER_BYTE <
1313 priv->cfg->base_params->num_of_queues);
1544 1314
1545 ucode_flags = fw->ucode_capa.flags; 1315 ucode_flags = fw->ucode_capa.flags;
1546 1316
@@ -1551,15 +1321,9 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1551 if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) { 1321 if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) {
1552 priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN; 1322 priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
1553 trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM; 1323 trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
1554 trans_cfg.queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
1555 trans_cfg.n_queue_to_fifo =
1556 ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo);
1557 } else { 1324 } else {
1558 priv->sta_key_max_num = STA_KEY_MAX_NUM; 1325 priv->sta_key_max_num = STA_KEY_MAX_NUM;
1559 trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM; 1326 trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
1560 trans_cfg.queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
1561 trans_cfg.n_queue_to_fifo =
1562 ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo);
1563 } 1327 }
1564 1328
1565 /* Configure transport layer */ 1329 /* Configure transport layer */
@@ -1599,25 +1363,33 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1599 goto out_free_hw; 1363 goto out_free_hw;
1600 1364
1601 /* Read the EEPROM */ 1365 /* Read the EEPROM */
1602 if (iwl_eeprom_init(priv, priv->trans->hw_rev)) { 1366 if (iwl_read_eeprom(priv->trans, &priv->eeprom_blob,
1367 &priv->eeprom_blob_size)) {
1603 IWL_ERR(priv, "Unable to init EEPROM\n"); 1368 IWL_ERR(priv, "Unable to init EEPROM\n");
1604 goto out_free_hw; 1369 goto out_free_hw;
1605 } 1370 }
1371
1606 /* Reset chip to save power until we load uCode during "up". */ 1372 /* Reset chip to save power until we load uCode during "up". */
1607 iwl_trans_stop_hw(priv->trans, false); 1373 iwl_trans_stop_hw(priv->trans, false);
1608 1374
1609 if (iwl_eeprom_check_version(priv)) 1375 priv->eeprom_data = iwl_parse_eeprom_data(priv->trans->dev, priv->cfg,
1376 priv->eeprom_blob,
1377 priv->eeprom_blob_size);
1378 if (!priv->eeprom_data)
1379 goto out_free_eeprom_blob;
1380
1381 if (iwl_eeprom_check_version(priv->eeprom_data, priv->trans))
1610 goto out_free_eeprom; 1382 goto out_free_eeprom;
1611 1383
1612 if (iwl_eeprom_init_hw_params(priv)) 1384 if (iwl_eeprom_init_hw_params(priv))
1613 goto out_free_eeprom; 1385 goto out_free_eeprom;
1614 1386
1615 /* extract MAC Address */ 1387 /* extract MAC Address */
1616 iwl_eeprom_get_mac(priv, priv->addresses[0].addr); 1388 memcpy(priv->addresses[0].addr, priv->eeprom_data->hw_addr, ETH_ALEN);
1617 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr); 1389 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
1618 priv->hw->wiphy->addresses = priv->addresses; 1390 priv->hw->wiphy->addresses = priv->addresses;
1619 priv->hw->wiphy->n_addresses = 1; 1391 priv->hw->wiphy->n_addresses = 1;
1620 num_mac = iwl_eeprom_query16(priv, EEPROM_NUM_MAC_ADDRESS); 1392 num_mac = priv->eeprom_data->n_hw_addrs;
1621 if (num_mac > 1) { 1393 if (num_mac > 1) {
1622 memcpy(priv->addresses[1].addr, priv->addresses[0].addr, 1394 memcpy(priv->addresses[1].addr, priv->addresses[0].addr,
1623 ETH_ALEN); 1395 ETH_ALEN);
@@ -1630,7 +1402,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1630 ************************/ 1402 ************************/
1631 iwl_set_hw_params(priv); 1403 iwl_set_hw_params(priv);
1632 1404
1633 if (!(priv->hw_params.sku & EEPROM_SKU_CAP_IPAN_ENABLE)) { 1405 if (!(priv->eeprom_data->sku & EEPROM_SKU_CAP_IPAN_ENABLE)) {
1634 IWL_DEBUG_INFO(priv, "Your EEPROM disabled PAN"); 1406 IWL_DEBUG_INFO(priv, "Your EEPROM disabled PAN");
1635 ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN; 1407 ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
1636 /* 1408 /*
@@ -1640,9 +1412,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1640 ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P; 1412 ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
1641 priv->sta_key_max_num = STA_KEY_MAX_NUM; 1413 priv->sta_key_max_num = STA_KEY_MAX_NUM;
1642 trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM; 1414 trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
1643 trans_cfg.queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
1644 trans_cfg.n_queue_to_fifo =
1645 ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo);
1646 1415
1647 /* Configure transport layer again*/ 1416 /* Configure transport layer again*/
1648 iwl_trans_configure(priv->trans, &trans_cfg); 1417 iwl_trans_configure(priv->trans, &trans_cfg);
@@ -1660,9 +1429,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1660 atomic_set(&priv->queue_stop_count[i], 0); 1429 atomic_set(&priv->queue_stop_count[i], 0);
1661 } 1430 }
1662 1431
1663 WARN_ON(trans_cfg.queue_to_fifo[trans_cfg.cmd_queue] !=
1664 IWLAGN_CMD_FIFO_NUM);
1665
1666 if (iwl_init_drv(priv)) 1432 if (iwl_init_drv(priv))
1667 goto out_free_eeprom; 1433 goto out_free_eeprom;
1668 1434
@@ -1711,8 +1477,10 @@ out_destroy_workqueue:
1711 destroy_workqueue(priv->workqueue); 1477 destroy_workqueue(priv->workqueue);
1712 priv->workqueue = NULL; 1478 priv->workqueue = NULL;
1713 iwl_uninit_drv(priv); 1479 iwl_uninit_drv(priv);
1480out_free_eeprom_blob:
1481 kfree(priv->eeprom_blob);
1714out_free_eeprom: 1482out_free_eeprom:
1715 iwl_eeprom_free(priv); 1483 iwl_free_eeprom_data(priv->eeprom_data);
1716out_free_hw: 1484out_free_hw:
1717 ieee80211_free_hw(priv->hw); 1485 ieee80211_free_hw(priv->hw);
1718out: 1486out:
@@ -1720,7 +1488,7 @@ out:
1720 return op_mode; 1488 return op_mode;
1721} 1489}
1722 1490
1723void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode) 1491static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
1724{ 1492{
1725 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); 1493 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1726 1494
@@ -1728,7 +1496,7 @@ void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
1728 1496
1729 iwl_dbgfs_unregister(priv); 1497 iwl_dbgfs_unregister(priv);
1730 1498
1731 iwl_testmode_cleanup(priv); 1499 iwl_testmode_free(priv);
1732 iwlagn_mac_unregister(priv); 1500 iwlagn_mac_unregister(priv);
1733 1501
1734 iwl_tt_exit(priv); 1502 iwl_tt_exit(priv);
@@ -1737,7 +1505,8 @@ void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
1737 priv->ucode_loaded = false; 1505 priv->ucode_loaded = false;
1738 iwl_trans_stop_device(priv->trans); 1506 iwl_trans_stop_device(priv->trans);
1739 1507
1740 iwl_eeprom_free(priv); 1508 kfree(priv->eeprom_blob);
1509 iwl_free_eeprom_data(priv->eeprom_data);
1741 1510
1742 /*netif_stop_queue(dev); */ 1511 /*netif_stop_queue(dev); */
1743 flush_workqueue(priv->workqueue); 1512 flush_workqueue(priv->workqueue);
@@ -1850,7 +1619,7 @@ static void iwl_dump_nic_error_log(struct iwl_priv *priv)
1850 } 1619 }
1851 1620
1852 /*TODO: Update dbgfs with ISR error stats obtained below */ 1621 /*TODO: Update dbgfs with ISR error stats obtained below */
1853 iwl_read_targ_mem_words(trans, base, &table, sizeof(table)); 1622 iwl_read_targ_mem_bytes(trans, base, &table, sizeof(table));
1854 1623
1855 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { 1624 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
1856 IWL_ERR(trans, "Start IWL Error Log Dump:\n"); 1625 IWL_ERR(trans, "Start IWL Error Log Dump:\n");
@@ -2185,7 +1954,7 @@ static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
2185 } 1954 }
2186} 1955}
2187 1956
2188void iwl_nic_error(struct iwl_op_mode *op_mode) 1957static void iwl_nic_error(struct iwl_op_mode *op_mode)
2189{ 1958{
2190 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); 1959 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2191 1960
@@ -2198,7 +1967,7 @@ void iwl_nic_error(struct iwl_op_mode *op_mode)
2198 iwlagn_fw_error(priv, false); 1967 iwlagn_fw_error(priv, false);
2199} 1968}
2200 1969
2201void iwl_cmd_queue_full(struct iwl_op_mode *op_mode) 1970static void iwl_cmd_queue_full(struct iwl_op_mode *op_mode)
2202{ 1971{
2203 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); 1972 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2204 1973
@@ -2208,11 +1977,60 @@ void iwl_cmd_queue_full(struct iwl_op_mode *op_mode)
2208 } 1977 }
2209} 1978}
2210 1979
2211void iwl_nic_config(struct iwl_op_mode *op_mode) 1980#define EEPROM_RF_CONFIG_TYPE_MAX 0x3
1981
1982static void iwl_nic_config(struct iwl_op_mode *op_mode)
2212{ 1983{
2213 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); 1984 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1985 u16 radio_cfg = priv->eeprom_data->radio_cfg;
1986
1987 /* SKU Control */
1988 iwl_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
1989 CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
1990 CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP,
1991 (CSR_HW_REV_STEP(priv->trans->hw_rev) <<
1992 CSR_HW_IF_CONFIG_REG_POS_MAC_STEP) |
1993 (CSR_HW_REV_DASH(priv->trans->hw_rev) <<
1994 CSR_HW_IF_CONFIG_REG_POS_MAC_DASH));
1995
1996 /* write radio config values to register */
1997 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) {
1998 u32 reg_val =
1999 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <<
2000 CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE |
2001 EEPROM_RF_CFG_STEP_MSK(radio_cfg) <<
2002 CSR_HW_IF_CONFIG_REG_POS_PHY_STEP |
2003 EEPROM_RF_CFG_DASH_MSK(radio_cfg) <<
2004 CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2005
2006 iwl_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
2007 CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2008 CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2009 CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH, reg_val);
2010
2011 IWL_INFO(priv, "Radio type=0x%x-0x%x-0x%x\n",
2012 EEPROM_RF_CFG_TYPE_MSK(radio_cfg),
2013 EEPROM_RF_CFG_STEP_MSK(radio_cfg),
2014 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
2015 } else {
2016 WARN_ON(1);
2017 }
2214 2018
2215 priv->lib->nic_config(priv); 2019 /* set CSR_HW_CONFIG_REG for uCode use */
2020 iwl_set_bit(priv->trans, CSR_HW_IF_CONFIG_REG,
2021 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2022 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
2023
2024 /* W/A : NIC is stuck in a reset state after Early PCIe power off
2025 * (PCIe power is lost before PERST# is asserted),
2026 * causing ME FW to lose ownership and not being able to obtain it back.
2027 */
2028 iwl_set_bits_mask_prph(priv->trans, APMG_PS_CTRL_REG,
2029 APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
2030 ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
2031
2032 if (priv->lib->nic_config)
2033 priv->lib->nic_config(priv);
2216} 2034}
2217 2035
2218static void iwl_wimax_active(struct iwl_op_mode *op_mode) 2036static void iwl_wimax_active(struct iwl_op_mode *op_mode)
@@ -2223,7 +2041,7 @@ static void iwl_wimax_active(struct iwl_op_mode *op_mode)
2223 IWL_ERR(priv, "RF is used by WiMAX\n"); 2041 IWL_ERR(priv, "RF is used by WiMAX\n");
2224} 2042}
2225 2043
2226void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, int queue) 2044static void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
2227{ 2045{
2228 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); 2046 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2229 int mq = priv->queue_to_mac80211[queue]; 2047 int mq = priv->queue_to_mac80211[queue];
@@ -2242,7 +2060,7 @@ void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
2242 ieee80211_stop_queue(priv->hw, mq); 2060 ieee80211_stop_queue(priv->hw, mq);
2243} 2061}
2244 2062
2245void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, int queue) 2063static void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
2246{ 2064{
2247 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); 2065 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2248 int mq = priv->queue_to_mac80211[queue]; 2066 int mq = priv->queue_to_mac80211[queue];
@@ -2282,16 +2100,17 @@ void iwlagn_lift_passive_no_rx(struct iwl_priv *priv)
2282 priv->passive_no_rx = false; 2100 priv->passive_no_rx = false;
2283} 2101}
2284 2102
2285void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) 2103static void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
2286{ 2104{
2105 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2287 struct ieee80211_tx_info *info; 2106 struct ieee80211_tx_info *info;
2288 2107
2289 info = IEEE80211_SKB_CB(skb); 2108 info = IEEE80211_SKB_CB(skb);
2290 kmem_cache_free(iwl_tx_cmd_pool, (info->driver_data[1])); 2109 iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
2291 dev_kfree_skb_any(skb); 2110 dev_kfree_skb_any(skb);
2292} 2111}
2293 2112
2294void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) 2113static void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
2295{ 2114{
2296 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); 2115 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2297 2116
@@ -2303,7 +2122,7 @@ void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
2303 wiphy_rfkill_set_hw_state(priv->hw->wiphy, state); 2122 wiphy_rfkill_set_hw_state(priv->hw->wiphy, state);
2304} 2123}
2305 2124
2306const struct iwl_op_mode_ops iwl_dvm_ops = { 2125static const struct iwl_op_mode_ops iwl_dvm_ops = {
2307 .start = iwl_op_mode_dvm_start, 2126 .start = iwl_op_mode_dvm_start,
2308 .stop = iwl_op_mode_dvm_stop, 2127 .stop = iwl_op_mode_dvm_stop,
2309 .rx = iwl_rx_dispatch, 2128 .rx = iwl_rx_dispatch,
@@ -2322,9 +2141,6 @@ const struct iwl_op_mode_ops iwl_dvm_ops = {
2322 * driver and module entry point 2141 * driver and module entry point
2323 * 2142 *
2324 *****************************************************************************/ 2143 *****************************************************************************/
2325
2326struct kmem_cache *iwl_tx_cmd_pool;
2327
2328static int __init iwl_init(void) 2144static int __init iwl_init(void)
2329{ 2145{
2330 2146
@@ -2332,36 +2148,25 @@ static int __init iwl_init(void)
2332 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n"); 2148 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
2333 pr_info(DRV_COPYRIGHT "\n"); 2149 pr_info(DRV_COPYRIGHT "\n");
2334 2150
2335 iwl_tx_cmd_pool = kmem_cache_create("iwl_dev_cmd",
2336 sizeof(struct iwl_device_cmd),
2337 sizeof(void *), 0, NULL);
2338 if (!iwl_tx_cmd_pool)
2339 return -ENOMEM;
2340
2341 ret = iwlagn_rate_control_register(); 2151 ret = iwlagn_rate_control_register();
2342 if (ret) { 2152 if (ret) {
2343 pr_err("Unable to register rate control algorithm: %d\n", ret); 2153 pr_err("Unable to register rate control algorithm: %d\n", ret);
2344 goto error_rc_register; 2154 return ret;
2345 } 2155 }
2346 2156
2347 ret = iwl_pci_register_driver(); 2157 ret = iwl_opmode_register("iwldvm", &iwl_dvm_ops);
2348 if (ret) 2158 if (ret) {
2349 goto error_pci_register; 2159 pr_err("Unable to register op_mode: %d\n", ret);
2350 return ret; 2160 iwlagn_rate_control_unregister();
2161 }
2351 2162
2352error_pci_register:
2353 iwlagn_rate_control_unregister();
2354error_rc_register:
2355 kmem_cache_destroy(iwl_tx_cmd_pool);
2356 return ret; 2163 return ret;
2357} 2164}
2165module_init(iwl_init);
2358 2166
2359static void __exit iwl_exit(void) 2167static void __exit iwl_exit(void)
2360{ 2168{
2361 iwl_pci_unregister_driver(); 2169 iwl_opmode_deregister("iwldvm");
2362 iwlagn_rate_control_unregister(); 2170 iwlagn_rate_control_unregister();
2363 kmem_cache_destroy(iwl_tx_cmd_pool);
2364} 2171}
2365
2366module_exit(iwl_exit); 2172module_exit(iwl_exit);
2367module_init(iwl_init);
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/dvm/power.c
index 544ddf17f5bd..518cf3715809 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/dvm/power.c
@@ -31,18 +31,15 @@
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/init.h> 33#include <linux/init.h>
34
35#include <net/mac80211.h> 34#include <net/mac80211.h>
36
37#include "iwl-eeprom.h"
38#include "iwl-dev.h"
39#include "iwl-agn.h"
40#include "iwl-io.h" 35#include "iwl-io.h"
41#include "iwl-commands.h"
42#include "iwl-debug.h" 36#include "iwl-debug.h"
43#include "iwl-power.h"
44#include "iwl-trans.h" 37#include "iwl-trans.h"
45#include "iwl-modparams.h" 38#include "iwl-modparams.h"
39#include "dev.h"
40#include "agn.h"
41#include "commands.h"
42#include "power.h"
46 43
47/* 44/*
48 * Setting power level allows the card to go to sleep when not busy. 45 * Setting power level allows the card to go to sleep when not busy.
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/dvm/power.h
index 21afc92efacb..a2cee7f04848 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.h
+++ b/drivers/net/wireless/iwlwifi/dvm/power.h
@@ -28,7 +28,7 @@
28#ifndef __iwl_power_setting_h__ 28#ifndef __iwl_power_setting_h__
29#define __iwl_power_setting_h__ 29#define __iwl_power_setting_h__
30 30
31#include "iwl-commands.h" 31#include "commands.h"
32 32
33struct iwl_power_mgr { 33struct iwl_power_mgr {
34 struct iwl_powertable_cmd sleep_cmd; 34 struct iwl_powertable_cmd sleep_cmd;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index 8cebd7c363fc..6fddd2785e6e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -35,10 +35,8 @@
35 35
36#include <linux/workqueue.h> 36#include <linux/workqueue.h>
37 37
38#include "iwl-dev.h" 38#include "dev.h"
39#include "iwl-agn.h" 39#include "agn.h"
40#include "iwl-op-mode.h"
41#include "iwl-modparams.h"
42 40
43#define RS_NAME "iwl-agn-rs" 41#define RS_NAME "iwl-agn-rs"
44 42
@@ -819,7 +817,7 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
819 817
820 if (num_of_ant(tbl->ant_type) > 1) 818 if (num_of_ant(tbl->ant_type) > 1)
821 tbl->ant_type = 819 tbl->ant_type =
822 first_antenna(priv->hw_params.valid_tx_ant); 820 first_antenna(priv->eeprom_data->valid_tx_ant);
823 821
824 tbl->is_ht40 = 0; 822 tbl->is_ht40 = 0;
825 tbl->is_SGI = 0; 823 tbl->is_SGI = 0;
@@ -1447,7 +1445,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1447 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1445 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1448 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1446 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1449 u8 start_action; 1447 u8 start_action;
1450 u8 valid_tx_ant = priv->hw_params.valid_tx_ant; 1448 u8 valid_tx_ant = priv->eeprom_data->valid_tx_ant;
1451 u8 tx_chains_num = priv->hw_params.tx_chains_num; 1449 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1452 int ret = 0; 1450 int ret = 0;
1453 u8 update_search_tbl_counter = 0; 1451 u8 update_search_tbl_counter = 0;
@@ -1465,7 +1463,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1465 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: 1463 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1466 /* avoid antenna B and MIMO */ 1464 /* avoid antenna B and MIMO */
1467 valid_tx_ant = 1465 valid_tx_ant =
1468 first_antenna(priv->hw_params.valid_tx_ant); 1466 first_antenna(priv->eeprom_data->valid_tx_ant);
1469 if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2 && 1467 if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2 &&
1470 tbl->action != IWL_LEGACY_SWITCH_SISO) 1468 tbl->action != IWL_LEGACY_SWITCH_SISO)
1471 tbl->action = IWL_LEGACY_SWITCH_SISO; 1469 tbl->action = IWL_LEGACY_SWITCH_SISO;
@@ -1489,7 +1487,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1489 else if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2) 1487 else if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
1490 tbl->action = IWL_LEGACY_SWITCH_SISO; 1488 tbl->action = IWL_LEGACY_SWITCH_SISO;
1491 valid_tx_ant = 1489 valid_tx_ant =
1492 first_antenna(priv->hw_params.valid_tx_ant); 1490 first_antenna(priv->eeprom_data->valid_tx_ant);
1493 } 1491 }
1494 1492
1495 start_action = tbl->action; 1493 start_action = tbl->action;
@@ -1623,7 +1621,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1623 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1621 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1624 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1622 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1625 u8 start_action; 1623 u8 start_action;
1626 u8 valid_tx_ant = priv->hw_params.valid_tx_ant; 1624 u8 valid_tx_ant = priv->eeprom_data->valid_tx_ant;
1627 u8 tx_chains_num = priv->hw_params.tx_chains_num; 1625 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1628 u8 update_search_tbl_counter = 0; 1626 u8 update_search_tbl_counter = 0;
1629 int ret; 1627 int ret;
@@ -1641,7 +1639,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1641 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: 1639 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1642 /* avoid antenna B and MIMO */ 1640 /* avoid antenna B and MIMO */
1643 valid_tx_ant = 1641 valid_tx_ant =
1644 first_antenna(priv->hw_params.valid_tx_ant); 1642 first_antenna(priv->eeprom_data->valid_tx_ant);
1645 if (tbl->action != IWL_SISO_SWITCH_ANTENNA1) 1643 if (tbl->action != IWL_SISO_SWITCH_ANTENNA1)
1646 tbl->action = IWL_SISO_SWITCH_ANTENNA1; 1644 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1647 break; 1645 break;
@@ -1659,7 +1657,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1659 /* configure as 1x1 if bt full concurrency */ 1657 /* configure as 1x1 if bt full concurrency */
1660 if (priv->bt_full_concurrent) { 1658 if (priv->bt_full_concurrent) {
1661 valid_tx_ant = 1659 valid_tx_ant =
1662 first_antenna(priv->hw_params.valid_tx_ant); 1660 first_antenna(priv->eeprom_data->valid_tx_ant);
1663 if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2) 1661 if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
1664 tbl->action = IWL_SISO_SWITCH_ANTENNA1; 1662 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1665 } 1663 }
@@ -1795,7 +1793,7 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv,
1795 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1793 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1796 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1794 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1797 u8 start_action; 1795 u8 start_action;
1798 u8 valid_tx_ant = priv->hw_params.valid_tx_ant; 1796 u8 valid_tx_ant = priv->eeprom_data->valid_tx_ant;
1799 u8 tx_chains_num = priv->hw_params.tx_chains_num; 1797 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1800 u8 update_search_tbl_counter = 0; 1798 u8 update_search_tbl_counter = 0;
1801 int ret; 1799 int ret;
@@ -1965,7 +1963,7 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv,
1965 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1963 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1966 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1964 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1967 u8 start_action; 1965 u8 start_action;
1968 u8 valid_tx_ant = priv->hw_params.valid_tx_ant; 1966 u8 valid_tx_ant = priv->eeprom_data->valid_tx_ant;
1969 u8 tx_chains_num = priv->hw_params.tx_chains_num; 1967 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1970 int ret; 1968 int ret;
1971 u8 update_search_tbl_counter = 0; 1969 u8 update_search_tbl_counter = 0;
@@ -2699,7 +2697,7 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2699 2697
2700 i = lq_sta->last_txrate_idx; 2698 i = lq_sta->last_txrate_idx;
2701 2699
2702 valid_tx_ant = priv->hw_params.valid_tx_ant; 2700 valid_tx_ant = priv->eeprom_data->valid_tx_ant;
2703 2701
2704 if (!lq_sta->search_better_tbl) 2702 if (!lq_sta->search_better_tbl)
2705 active_tbl = lq_sta->active_tbl; 2703 active_tbl = lq_sta->active_tbl;
@@ -2893,15 +2891,15 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
2893 2891
2894 /* These values will be overridden later */ 2892 /* These values will be overridden later */
2895 lq_sta->lq.general_params.single_stream_ant_msk = 2893 lq_sta->lq.general_params.single_stream_ant_msk =
2896 first_antenna(priv->hw_params.valid_tx_ant); 2894 first_antenna(priv->eeprom_data->valid_tx_ant);
2897 lq_sta->lq.general_params.dual_stream_ant_msk = 2895 lq_sta->lq.general_params.dual_stream_ant_msk =
2898 priv->hw_params.valid_tx_ant & 2896 priv->eeprom_data->valid_tx_ant &
2899 ~first_antenna(priv->hw_params.valid_tx_ant); 2897 ~first_antenna(priv->eeprom_data->valid_tx_ant);
2900 if (!lq_sta->lq.general_params.dual_stream_ant_msk) { 2898 if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
2901 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB; 2899 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
2902 } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) { 2900 } else if (num_of_ant(priv->eeprom_data->valid_tx_ant) == 2) {
2903 lq_sta->lq.general_params.dual_stream_ant_msk = 2901 lq_sta->lq.general_params.dual_stream_ant_msk =
2904 priv->hw_params.valid_tx_ant; 2902 priv->eeprom_data->valid_tx_ant;
2905 } 2903 }
2906 2904
2907 /* as default allow aggregation for all tids */ 2905 /* as default allow aggregation for all tids */
@@ -2947,7 +2945,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
2947 if (priv && priv->bt_full_concurrent) { 2945 if (priv && priv->bt_full_concurrent) {
2948 /* 1x1 only */ 2946 /* 1x1 only */
2949 tbl_type.ant_type = 2947 tbl_type.ant_type =
2950 first_antenna(priv->hw_params.valid_tx_ant); 2948 first_antenna(priv->eeprom_data->valid_tx_ant);
2951 } 2949 }
2952 2950
2953 /* How many times should we repeat the initial rate? */ 2951 /* How many times should we repeat the initial rate? */
@@ -2979,7 +2977,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
2979 if (priv->bt_full_concurrent) 2977 if (priv->bt_full_concurrent)
2980 valid_tx_ant = ANT_A; 2978 valid_tx_ant = ANT_A;
2981 else 2979 else
2982 valid_tx_ant = priv->hw_params.valid_tx_ant; 2980 valid_tx_ant = priv->eeprom_data->valid_tx_ant;
2983 } 2981 }
2984 2982
2985 /* Fill rest of rate table */ 2983 /* Fill rest of rate table */
@@ -3013,7 +3011,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
3013 if (priv && priv->bt_full_concurrent) { 3011 if (priv && priv->bt_full_concurrent) {
3014 /* 1x1 only */ 3012 /* 1x1 only */
3015 tbl_type.ant_type = 3013 tbl_type.ant_type =
3016 first_antenna(priv->hw_params.valid_tx_ant); 3014 first_antenna(priv->eeprom_data->valid_tx_ant);
3017 } 3015 }
3018 3016
3019 /* Indicate to uCode which entries might be MIMO. 3017 /* Indicate to uCode which entries might be MIMO.
@@ -3100,7 +3098,7 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
3100 u8 ant_sel_tx; 3098 u8 ant_sel_tx;
3101 3099
3102 priv = lq_sta->drv; 3100 priv = lq_sta->drv;
3103 valid_tx_ant = priv->hw_params.valid_tx_ant; 3101 valid_tx_ant = priv->eeprom_data->valid_tx_ant;
3104 if (lq_sta->dbg_fixed_rate) { 3102 if (lq_sta->dbg_fixed_rate) {
3105 ant_sel_tx = 3103 ant_sel_tx =
3106 ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) 3104 ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
@@ -3171,9 +3169,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
3171 desc += sprintf(buff+desc, "fixed rate 0x%X\n", 3169 desc += sprintf(buff+desc, "fixed rate 0x%X\n",
3172 lq_sta->dbg_fixed_rate); 3170 lq_sta->dbg_fixed_rate);
3173 desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n", 3171 desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
3174 (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "", 3172 (priv->eeprom_data->valid_tx_ant & ANT_A) ? "ANT_A," : "",
3175 (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "", 3173 (priv->eeprom_data->valid_tx_ant & ANT_B) ? "ANT_B," : "",
3176 (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : ""); 3174 (priv->eeprom_data->valid_tx_ant & ANT_C) ? "ANT_C" : "");
3177 desc += sprintf(buff+desc, "lq type %s\n", 3175 desc += sprintf(buff+desc, "lq type %s\n",
3178 (is_legacy(tbl->lq_type)) ? "legacy" : "HT"); 3176 (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
3179 if (is_Ht(tbl->lq_type)) { 3177 if (is_Ht(tbl->lq_type)) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/dvm/rs.h
index 82d02e1ae89f..ad3aea8f626a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.h
@@ -29,9 +29,10 @@
29 29
30#include <net/mac80211.h> 30#include <net/mac80211.h>
31 31
32#include "iwl-commands.h"
33#include "iwl-config.h" 32#include "iwl-config.h"
34 33
34#include "commands.h"
35
35struct iwl_rate_info { 36struct iwl_rate_info {
36 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ 37 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
37 u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */ 38 u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c
index 403de96f9747..c1f7a18e08dd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rx.c
@@ -32,12 +32,10 @@
32#include <linux/sched.h> 32#include <linux/sched.h>
33#include <net/mac80211.h> 33#include <net/mac80211.h>
34#include <asm/unaligned.h> 34#include <asm/unaligned.h>
35#include "iwl-eeprom.h"
36#include "iwl-dev.h"
37#include "iwl-io.h" 35#include "iwl-io.h"
38#include "iwl-agn-calib.h" 36#include "dev.h"
39#include "iwl-agn.h" 37#include "calib.h"
40#include "iwl-modparams.h" 38#include "agn.h"
41 39
42#define IWL_CMD_ENTRY(x) [x] = #x 40#define IWL_CMD_ENTRY(x) [x] = #x
43 41
@@ -1012,6 +1010,8 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
1012 rx_status.flag |= RX_FLAG_40MHZ; 1010 rx_status.flag |= RX_FLAG_40MHZ;
1013 if (rate_n_flags & RATE_MCS_SGI_MSK) 1011 if (rate_n_flags & RATE_MCS_SGI_MSK)
1014 rx_status.flag |= RX_FLAG_SHORT_GI; 1012 rx_status.flag |= RX_FLAG_SHORT_GI;
1013 if (rate_n_flags & RATE_MCS_GF_MSK)
1014 rx_status.flag |= RX_FLAG_HT_GF;
1015 1015
1016 iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status, 1016 iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status,
1017 rxb, &rx_status); 1017 rxb, &rx_status);
@@ -1124,8 +1124,6 @@ int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
1124{ 1124{
1125 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1125 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1126 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); 1126 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1127 void (*pre_rx_handler)(struct iwl_priv *,
1128 struct iwl_rx_cmd_buffer *);
1129 int err = 0; 1127 int err = 0;
1130 1128
1131 /* 1129 /*
@@ -1135,19 +1133,19 @@ int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
1135 */ 1133 */
1136 iwl_notification_wait_notify(&priv->notif_wait, pkt); 1134 iwl_notification_wait_notify(&priv->notif_wait, pkt);
1137 1135
1138 /* RX data may be forwarded to userspace (using pre_rx_handler) in one 1136#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
1139 * of two cases: the first, that the user owns the uCode through 1137 /*
1140 * testmode - in such case the pre_rx_handler is set and no further 1138 * RX data may be forwarded to userspace in one
1141 * processing takes place. The other case is when the user want to 1139 * of two cases: the user owns the fw through testmode or when
1142 * monitor the rx w/o affecting the regular flow - the pre_rx_handler 1140 * the user requested to monitor the rx w/o affecting the regular flow.
1143 * will be set but the ownership flag != IWL_OWNERSHIP_TM and the flow 1141 * In these cases the iwl_test object will handle forwarding the rx
1142 * data to user space.
1143 * Note that if the ownership flag != IWL_OWNERSHIP_TM the flow
1144 * continues. 1144 * continues.
1145 * We need to use ACCESS_ONCE to prevent a case where the handler
1146 * changes between the check and the call.
1147 */ 1145 */
1148 pre_rx_handler = ACCESS_ONCE(priv->pre_rx_handler); 1146 iwl_test_rx(&priv->tst, rxb);
1149 if (pre_rx_handler) 1147#endif
1150 pre_rx_handler(priv, rxb); 1148
1151 if (priv->ucode_owner != IWL_OWNERSHIP_TM) { 1149 if (priv->ucode_owner != IWL_OWNERSHIP_TM) {
1152 /* Based on type of command response or notification, 1150 /* Based on type of command response or notification,
1153 * handle those that need handling via function in 1151 * handle those that need handling via function in
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index 0a3aa7c83003..6ee940f497f9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -25,11 +25,11 @@
25 *****************************************************************************/ 25 *****************************************************************************/
26 26
27#include <linux/etherdevice.h> 27#include <linux/etherdevice.h>
28#include "iwl-dev.h"
29#include "iwl-agn.h"
30#include "iwl-agn-calib.h"
31#include "iwl-trans.h" 28#include "iwl-trans.h"
32#include "iwl-modparams.h" 29#include "iwl-modparams.h"
30#include "dev.h"
31#include "agn.h"
32#include "calib.h"
33 33
34/* 34/*
35 * initialize rxon structure with default values from eeprom 35 * initialize rxon structure with default values from eeprom
@@ -37,8 +37,6 @@
37void iwl_connection_init_rx_config(struct iwl_priv *priv, 37void iwl_connection_init_rx_config(struct iwl_priv *priv,
38 struct iwl_rxon_context *ctx) 38 struct iwl_rxon_context *ctx)
39{ 39{
40 const struct iwl_channel_info *ch_info;
41
42 memset(&ctx->staging, 0, sizeof(ctx->staging)); 40 memset(&ctx->staging, 0, sizeof(ctx->staging));
43 41
44 if (!ctx->vif) { 42 if (!ctx->vif) {
@@ -80,14 +78,8 @@ void iwl_connection_init_rx_config(struct iwl_priv *priv,
80 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 78 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
81#endif 79#endif
82 80
83 ch_info = iwl_get_channel_info(priv, priv->band, 81 ctx->staging.channel = cpu_to_le16(priv->hw->conf.channel->hw_value);
84 le16_to_cpu(ctx->active.channel)); 82 priv->band = priv->hw->conf.channel->band;
85
86 if (!ch_info)
87 ch_info = &priv->channel_info[0];
88
89 ctx->staging.channel = cpu_to_le16(ch_info->channel);
90 priv->band = ch_info->band;
91 83
92 iwl_set_flags_for_band(priv, ctx, priv->band, ctx->vif); 84 iwl_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
93 85
@@ -175,7 +167,8 @@ static int iwlagn_disconn_pan(struct iwl_priv *priv,
175 return ret; 167 return ret;
176} 168}
177 169
178void iwlagn_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx) 170static void iwlagn_update_qos(struct iwl_priv *priv,
171 struct iwl_rxon_context *ctx)
179{ 172{
180 int ret; 173 int ret;
181 174
@@ -202,8 +195,8 @@ void iwlagn_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
202 IWL_DEBUG_QUIET_RFKILL(priv, "Failed to update QoS\n"); 195 IWL_DEBUG_QUIET_RFKILL(priv, "Failed to update QoS\n");
203} 196}
204 197
205int iwlagn_update_beacon(struct iwl_priv *priv, 198static int iwlagn_update_beacon(struct iwl_priv *priv,
206 struct ieee80211_vif *vif) 199 struct ieee80211_vif *vif)
207{ 200{
208 lockdep_assert_held(&priv->mutex); 201 lockdep_assert_held(&priv->mutex);
209 202
@@ -215,7 +208,7 @@ int iwlagn_update_beacon(struct iwl_priv *priv,
215} 208}
216 209
217static int iwlagn_send_rxon_assoc(struct iwl_priv *priv, 210static int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
218 struct iwl_rxon_context *ctx) 211 struct iwl_rxon_context *ctx)
219{ 212{
220 int ret = 0; 213 int ret = 0;
221 struct iwl_rxon_assoc_cmd rxon_assoc; 214 struct iwl_rxon_assoc_cmd rxon_assoc;
@@ -427,10 +420,10 @@ static int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
427 return -EINVAL; 420 return -EINVAL;
428 } 421 }
429 422
430 if (tx_power > priv->tx_power_device_lmt) { 423 if (tx_power > DIV_ROUND_UP(priv->eeprom_data->max_tx_pwr_half_dbm, 2)) {
431 IWL_WARN(priv, 424 IWL_WARN(priv,
432 "Requested user TXPOWER %d above upper limit %d.\n", 425 "Requested user TXPOWER %d above upper limit %d.\n",
433 tx_power, priv->tx_power_device_lmt); 426 tx_power, priv->eeprom_data->max_tx_pwr_half_dbm);
434 return -EINVAL; 427 return -EINVAL;
435 } 428 }
436 429
@@ -863,8 +856,8 @@ static int iwl_check_rxon_cmd(struct iwl_priv *priv,
863 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that 856 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
864 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required. 857 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
865 */ 858 */
866int iwl_full_rxon_required(struct iwl_priv *priv, 859static int iwl_full_rxon_required(struct iwl_priv *priv,
867 struct iwl_rxon_context *ctx) 860 struct iwl_rxon_context *ctx)
868{ 861{
869 const struct iwl_rxon_cmd *staging = &ctx->staging; 862 const struct iwl_rxon_cmd *staging = &ctx->staging;
870 const struct iwl_rxon_cmd *active = &ctx->active; 863 const struct iwl_rxon_cmd *active = &ctx->active;
@@ -1189,7 +1182,6 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
1189 struct iwl_rxon_context *ctx; 1182 struct iwl_rxon_context *ctx;
1190 struct ieee80211_conf *conf = &hw->conf; 1183 struct ieee80211_conf *conf = &hw->conf;
1191 struct ieee80211_channel *channel = conf->channel; 1184 struct ieee80211_channel *channel = conf->channel;
1192 const struct iwl_channel_info *ch_info;
1193 int ret = 0; 1185 int ret = 0;
1194 1186
1195 IWL_DEBUG_MAC80211(priv, "enter: changed %#x\n", changed); 1187 IWL_DEBUG_MAC80211(priv, "enter: changed %#x\n", changed);
@@ -1223,14 +1215,6 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
1223 } 1215 }
1224 1216
1225 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 1217 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
1226 ch_info = iwl_get_channel_info(priv, channel->band,
1227 channel->hw_value);
1228 if (!is_channel_valid(ch_info)) {
1229 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
1230 ret = -EINVAL;
1231 goto out;
1232 }
1233
1234 for_each_context(priv, ctx) { 1218 for_each_context(priv, ctx) {
1235 /* Configure HT40 channels */ 1219 /* Configure HT40 channels */
1236 if (ctx->ht.enabled != conf_is_ht(conf)) 1220 if (ctx->ht.enabled != conf_is_ht(conf))
@@ -1294,9 +1278,9 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
1294 return ret; 1278 return ret;
1295} 1279}
1296 1280
1297void iwlagn_check_needed_chains(struct iwl_priv *priv, 1281static void iwlagn_check_needed_chains(struct iwl_priv *priv,
1298 struct iwl_rxon_context *ctx, 1282 struct iwl_rxon_context *ctx,
1299 struct ieee80211_bss_conf *bss_conf) 1283 struct ieee80211_bss_conf *bss_conf)
1300{ 1284{
1301 struct ieee80211_vif *vif = ctx->vif; 1285 struct ieee80211_vif *vif = ctx->vif;
1302 struct iwl_rxon_context *tmp; 1286 struct iwl_rxon_context *tmp;
@@ -1388,7 +1372,7 @@ void iwlagn_check_needed_chains(struct iwl_priv *priv,
1388 ht_conf->single_chain_sufficient = !need_multiple; 1372 ht_conf->single_chain_sufficient = !need_multiple;
1389} 1373}
1390 1374
1391void iwlagn_chain_noise_reset(struct iwl_priv *priv) 1375static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
1392{ 1376{
1393 struct iwl_chain_noise_data *data = &priv->chain_noise_data; 1377 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
1394 int ret; 1378 int ret;
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c
index 031d8e21f82f..e3467fa86899 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/dvm/scan.c
@@ -30,11 +30,8 @@
30#include <linux/etherdevice.h> 30#include <linux/etherdevice.h>
31#include <net/mac80211.h> 31#include <net/mac80211.h>
32 32
33#include "iwl-eeprom.h" 33#include "dev.h"
34#include "iwl-dev.h" 34#include "agn.h"
35#include "iwl-io.h"
36#include "iwl-agn.h"
37#include "iwl-trans.h"
38 35
39/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after 36/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
40 * sending probe req. This should be set long enough to hear probe responses 37 * sending probe req. This should be set long enough to hear probe responses
@@ -54,6 +51,9 @@
54#define IWL_CHANNEL_TUNE_TIME 5 51#define IWL_CHANNEL_TUNE_TIME 5
55#define MAX_SCAN_CHANNEL 50 52#define MAX_SCAN_CHANNEL 50
56 53
54/* For reset radio, need minimal dwell time only */
55#define IWL_RADIO_RESET_DWELL_TIME 5
56
57static int iwl_send_scan_abort(struct iwl_priv *priv) 57static int iwl_send_scan_abort(struct iwl_priv *priv)
58{ 58{
59 int ret; 59 int ret;
@@ -67,7 +67,6 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
67 * to receive scan abort command or it does not perform 67 * to receive scan abort command or it does not perform
68 * hardware scan currently */ 68 * hardware scan currently */
69 if (!test_bit(STATUS_READY, &priv->status) || 69 if (!test_bit(STATUS_READY, &priv->status) ||
70 !test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
71 !test_bit(STATUS_SCAN_HW, &priv->status) || 70 !test_bit(STATUS_SCAN_HW, &priv->status) ||
72 test_bit(STATUS_FW_ERROR, &priv->status)) 71 test_bit(STATUS_FW_ERROR, &priv->status))
73 return -EIO; 72 return -EIO;
@@ -101,11 +100,8 @@ static void iwl_complete_scan(struct iwl_priv *priv, bool aborted)
101 ieee80211_scan_completed(priv->hw, aborted); 100 ieee80211_scan_completed(priv->hw, aborted);
102 } 101 }
103 102
104 if (priv->scan_type == IWL_SCAN_ROC) { 103 if (priv->scan_type == IWL_SCAN_ROC)
105 ieee80211_remain_on_channel_expired(priv->hw); 104 iwl_scan_roc_expired(priv);
106 priv->hw_roc_channel = NULL;
107 schedule_delayed_work(&priv->hw_roc_disable_work, 10 * HZ);
108 }
109 105
110 priv->scan_type = IWL_SCAN_NORMAL; 106 priv->scan_type = IWL_SCAN_NORMAL;
111 priv->scan_vif = NULL; 107 priv->scan_vif = NULL;
@@ -134,11 +130,8 @@ static void iwl_process_scan_complete(struct iwl_priv *priv)
134 goto out_settings; 130 goto out_settings;
135 } 131 }
136 132
137 if (priv->scan_type == IWL_SCAN_ROC) { 133 if (priv->scan_type == IWL_SCAN_ROC)
138 ieee80211_remain_on_channel_expired(priv->hw); 134 iwl_scan_roc_expired(priv);
139 priv->hw_roc_channel = NULL;
140 schedule_delayed_work(&priv->hw_roc_disable_work, 10 * HZ);
141 }
142 135
143 if (priv->scan_type != IWL_SCAN_NORMAL && !aborted) { 136 if (priv->scan_type != IWL_SCAN_NORMAL && !aborted) {
144 int err; 137 int err;
@@ -403,15 +396,21 @@ static u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
403static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time) 396static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time)
404{ 397{
405 struct iwl_rxon_context *ctx; 398 struct iwl_rxon_context *ctx;
399 int limits[NUM_IWL_RXON_CTX] = {};
400 int n_active = 0;
401 u16 limit;
402
403 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
406 404
407 /* 405 /*
408 * If we're associated, we clamp the dwell time 98% 406 * If we're associated, we clamp the dwell time 98%
409 * of the smallest beacon interval (minus 2 * channel 407 * of the beacon interval (minus 2 * channel tune time)
410 * tune time) 408 * If both contexts are active, we have to restrict to
409 * 1/2 of the minimum of them, because they might be in
410 * lock-step with the time inbetween only half of what
411 * time we'd have in each of them.
411 */ 412 */
412 for_each_context(priv, ctx) { 413 for_each_context(priv, ctx) {
413 u16 value;
414
415 switch (ctx->staging.dev_type) { 414 switch (ctx->staging.dev_type) {
416 case RXON_DEV_TYPE_P2P: 415 case RXON_DEV_TYPE_P2P:
417 /* no timing constraints */ 416 /* no timing constraints */
@@ -431,14 +430,25 @@ static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time)
431 break; 430 break;
432 } 431 }
433 432
434 value = ctx->beacon_int; 433 limits[n_active++] = ctx->beacon_int ?: IWL_PASSIVE_DWELL_BASE;
435 if (!value)
436 value = IWL_PASSIVE_DWELL_BASE;
437 value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
438 dwell_time = min(value, dwell_time);
439 } 434 }
440 435
441 return dwell_time; 436 switch (n_active) {
437 case 0:
438 return dwell_time;
439 case 2:
440 limit = (limits[1] * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
441 limit /= 2;
442 dwell_time = min(limit, dwell_time);
443 /* fall through to limit further */
444 case 1:
445 limit = (limits[0] * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
446 limit /= n_active;
447 return min(limit, dwell_time);
448 default:
449 WARN_ON_ONCE(1);
450 return dwell_time;
451 }
442} 452}
443 453
444static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv, 454static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
@@ -453,27 +463,17 @@ static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
453 463
454/* Return valid, unused, channel for a passive scan to reset the RF */ 464/* Return valid, unused, channel for a passive scan to reset the RF */
455static u8 iwl_get_single_channel_number(struct iwl_priv *priv, 465static u8 iwl_get_single_channel_number(struct iwl_priv *priv,
456 enum ieee80211_band band) 466 enum ieee80211_band band)
457{ 467{
458 const struct iwl_channel_info *ch_info; 468 struct ieee80211_supported_band *sband = priv->hw->wiphy->bands[band];
459 int i;
460 u8 channel = 0;
461 u8 min, max;
462 struct iwl_rxon_context *ctx; 469 struct iwl_rxon_context *ctx;
470 int i;
463 471
464 if (band == IEEE80211_BAND_5GHZ) { 472 for (i = 0; i < sband->n_channels; i++) {
465 min = 14;
466 max = priv->channel_count;
467 } else {
468 min = 0;
469 max = 14;
470 }
471
472 for (i = min; i < max; i++) {
473 bool busy = false; 473 bool busy = false;
474 474
475 for_each_context(priv, ctx) { 475 for_each_context(priv, ctx) {
476 busy = priv->channel_info[i].channel == 476 busy = sband->channels[i].hw_value ==
477 le16_to_cpu(ctx->staging.channel); 477 le16_to_cpu(ctx->staging.channel);
478 if (busy) 478 if (busy)
479 break; 479 break;
@@ -482,54 +482,46 @@ static u8 iwl_get_single_channel_number(struct iwl_priv *priv,
482 if (busy) 482 if (busy)
483 continue; 483 continue;
484 484
485 channel = priv->channel_info[i].channel; 485 if (!(sband->channels[i].flags & IEEE80211_CHAN_DISABLED))
486 ch_info = iwl_get_channel_info(priv, band, channel); 486 return sband->channels[i].hw_value;
487 if (is_channel_valid(ch_info))
488 break;
489 } 487 }
490 488
491 return channel; 489 return 0;
492} 490}
493 491
494static int iwl_get_single_channel_for_scan(struct iwl_priv *priv, 492static int iwl_get_channel_for_reset_scan(struct iwl_priv *priv,
495 struct ieee80211_vif *vif, 493 struct ieee80211_vif *vif,
496 enum ieee80211_band band, 494 enum ieee80211_band band,
497 struct iwl_scan_channel *scan_ch) 495 struct iwl_scan_channel *scan_ch)
498{ 496{
499 const struct ieee80211_supported_band *sband; 497 const struct ieee80211_supported_band *sband;
500 u16 passive_dwell = 0; 498 u16 channel;
501 u16 active_dwell = 0;
502 int added = 0;
503 u16 channel = 0;
504 499
505 sband = iwl_get_hw_mode(priv, band); 500 sband = iwl_get_hw_mode(priv, band);
506 if (!sband) { 501 if (!sband) {
507 IWL_ERR(priv, "invalid band\n"); 502 IWL_ERR(priv, "invalid band\n");
508 return added; 503 return 0;
509 } 504 }
510 505
511 active_dwell = iwl_get_active_dwell_time(priv, band, 0);
512 passive_dwell = iwl_get_passive_dwell_time(priv, band);
513
514 if (passive_dwell <= active_dwell)
515 passive_dwell = active_dwell + 1;
516
517 channel = iwl_get_single_channel_number(priv, band); 506 channel = iwl_get_single_channel_number(priv, band);
518 if (channel) { 507 if (channel) {
519 scan_ch->channel = cpu_to_le16(channel); 508 scan_ch->channel = cpu_to_le16(channel);
520 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE; 509 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
521 scan_ch->active_dwell = cpu_to_le16(active_dwell); 510 scan_ch->active_dwell =
522 scan_ch->passive_dwell = cpu_to_le16(passive_dwell); 511 cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME);
512 scan_ch->passive_dwell =
513 cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME);
523 /* Set txpower levels to defaults */ 514 /* Set txpower levels to defaults */
524 scan_ch->dsp_atten = 110; 515 scan_ch->dsp_atten = 110;
525 if (band == IEEE80211_BAND_5GHZ) 516 if (band == IEEE80211_BAND_5GHZ)
526 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; 517 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
527 else 518 else
528 scan_ch->tx_gain = ((1 << 5) | (5 << 3)); 519 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
529 added++; 520 return 1;
530 } else 521 }
531 IWL_ERR(priv, "no valid channel found\n"); 522
532 return added; 523 IWL_ERR(priv, "no valid channel found\n");
524 return 0;
533} 525}
534 526
535static int iwl_get_channels_for_scan(struct iwl_priv *priv, 527static int iwl_get_channels_for_scan(struct iwl_priv *priv,
@@ -540,7 +532,6 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
540{ 532{
541 struct ieee80211_channel *chan; 533 struct ieee80211_channel *chan;
542 const struct ieee80211_supported_band *sband; 534 const struct ieee80211_supported_band *sband;
543 const struct iwl_channel_info *ch_info;
544 u16 passive_dwell = 0; 535 u16 passive_dwell = 0;
545 u16 active_dwell = 0; 536 u16 active_dwell = 0;
546 int added, i; 537 int added, i;
@@ -565,16 +556,7 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
565 channel = chan->hw_value; 556 channel = chan->hw_value;
566 scan_ch->channel = cpu_to_le16(channel); 557 scan_ch->channel = cpu_to_le16(channel);
567 558
568 ch_info = iwl_get_channel_info(priv, band, channel); 559 if (!is_active || (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
569 if (!is_channel_valid(ch_info)) {
570 IWL_DEBUG_SCAN(priv,
571 "Channel %d is INVALID for this band.\n",
572 channel);
573 continue;
574 }
575
576 if (!is_active || is_channel_passive(ch_info) ||
577 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
578 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE; 560 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
579 else 561 else
580 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE; 562 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
@@ -678,12 +660,12 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
678 u16 rx_chain = 0; 660 u16 rx_chain = 0;
679 enum ieee80211_band band; 661 enum ieee80211_band band;
680 u8 n_probes = 0; 662 u8 n_probes = 0;
681 u8 rx_ant = priv->hw_params.valid_rx_ant; 663 u8 rx_ant = priv->eeprom_data->valid_rx_ant;
682 u8 rate; 664 u8 rate;
683 bool is_active = false; 665 bool is_active = false;
684 int chan_mod; 666 int chan_mod;
685 u8 active_chains; 667 u8 active_chains;
686 u8 scan_tx_antennas = priv->hw_params.valid_tx_ant; 668 u8 scan_tx_antennas = priv->eeprom_data->valid_tx_ant;
687 int ret; 669 int ret;
688 int scan_cmd_size = sizeof(struct iwl_scan_cmd) + 670 int scan_cmd_size = sizeof(struct iwl_scan_cmd) +
689 MAX_SCAN_CHANNEL * sizeof(struct iwl_scan_channel) + 671 MAX_SCAN_CHANNEL * sizeof(struct iwl_scan_channel) +
@@ -755,6 +737,12 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
755 switch (priv->scan_type) { 737 switch (priv->scan_type) {
756 case IWL_SCAN_RADIO_RESET: 738 case IWL_SCAN_RADIO_RESET:
757 IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n"); 739 IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
740 /*
741 * Override quiet time as firmware checks that active
742 * dwell is >= quiet; since we use passive scan it'll
743 * not actually be used.
744 */
745 scan->quiet_time = cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME);
758 break; 746 break;
759 case IWL_SCAN_NORMAL: 747 case IWL_SCAN_NORMAL:
760 if (priv->scan_request->n_ssids) { 748 if (priv->scan_request->n_ssids) {
@@ -893,7 +881,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
893 881
894 /* MIMO is not used here, but value is required */ 882 /* MIMO is not used here, but value is required */
895 rx_chain |= 883 rx_chain |=
896 priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS; 884 priv->eeprom_data->valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
897 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS; 885 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
898 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS; 886 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
899 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS; 887 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
@@ -928,7 +916,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
928 switch (priv->scan_type) { 916 switch (priv->scan_type) {
929 case IWL_SCAN_RADIO_RESET: 917 case IWL_SCAN_RADIO_RESET:
930 scan->channel_count = 918 scan->channel_count =
931 iwl_get_single_channel_for_scan(priv, vif, band, 919 iwl_get_channel_for_reset_scan(priv, vif, band,
932 (void *)&scan->data[cmd_len]); 920 (void *)&scan->data[cmd_len]);
933 break; 921 break;
934 case IWL_SCAN_NORMAL: 922 case IWL_SCAN_NORMAL:
@@ -994,8 +982,10 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
994 set_bit(STATUS_SCAN_HW, &priv->status); 982 set_bit(STATUS_SCAN_HW, &priv->status);
995 983
996 ret = iwlagn_set_pan_params(priv); 984 ret = iwlagn_set_pan_params(priv);
997 if (ret) 985 if (ret) {
986 clear_bit(STATUS_SCAN_HW, &priv->status);
998 return ret; 987 return ret;
988 }
999 989
1000 ret = iwl_dvm_send_cmd(priv, &cmd); 990 ret = iwl_dvm_send_cmd(priv, &cmd);
1001 if (ret) { 991 if (ret) {
@@ -1008,7 +998,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1008 998
1009void iwl_init_scan_params(struct iwl_priv *priv) 999void iwl_init_scan_params(struct iwl_priv *priv)
1010{ 1000{
1011 u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1; 1001 u8 ant_idx = fls(priv->eeprom_data->valid_tx_ant) - 1;
1012 if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ]) 1002 if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
1013 priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx; 1003 priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
1014 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ]) 1004 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
@@ -1158,3 +1148,40 @@ void iwl_cancel_scan_deferred_work(struct iwl_priv *priv)
1158 mutex_unlock(&priv->mutex); 1148 mutex_unlock(&priv->mutex);
1159 } 1149 }
1160} 1150}
1151
1152void iwl_scan_roc_expired(struct iwl_priv *priv)
1153{
1154 /*
1155 * The status bit should be set here, to prevent a race
1156 * where the atomic_read returns 1, but before the execution continues
1157 * iwl_scan_offchannel_skb_status() checks if the status bit is set
1158 */
1159 set_bit(STATUS_SCAN_ROC_EXPIRED, &priv->status);
1160
1161 if (atomic_read(&priv->num_aux_in_flight) == 0) {
1162 ieee80211_remain_on_channel_expired(priv->hw);
1163 priv->hw_roc_channel = NULL;
1164 schedule_delayed_work(&priv->hw_roc_disable_work,
1165 10 * HZ);
1166
1167 clear_bit(STATUS_SCAN_ROC_EXPIRED, &priv->status);
1168 } else {
1169 IWL_DEBUG_SCAN(priv, "ROC done with %d frames in aux\n",
1170 atomic_read(&priv->num_aux_in_flight));
1171 }
1172}
1173
1174void iwl_scan_offchannel_skb(struct iwl_priv *priv)
1175{
1176 WARN_ON(!priv->hw_roc_start_notified);
1177 atomic_inc(&priv->num_aux_in_flight);
1178}
1179
1180void iwl_scan_offchannel_skb_status(struct iwl_priv *priv)
1181{
1182 if (atomic_dec_return(&priv->num_aux_in_flight) == 0 &&
1183 test_bit(STATUS_SCAN_ROC_EXPIRED, &priv->status)) {
1184 IWL_DEBUG_SCAN(priv, "0 aux frames. Calling ROC expired\n");
1185 iwl_scan_roc_expired(priv);
1186 }
1187}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
index eb6a8eaf42fc..b29b798f7550 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
@@ -28,10 +28,9 @@
28 *****************************************************************************/ 28 *****************************************************************************/
29#include <linux/etherdevice.h> 29#include <linux/etherdevice.h>
30#include <net/mac80211.h> 30#include <net/mac80211.h>
31
32#include "iwl-dev.h"
33#include "iwl-agn.h"
34#include "iwl-trans.h" 31#include "iwl-trans.h"
32#include "dev.h"
33#include "agn.h"
35 34
36const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 35const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
37 36
@@ -171,26 +170,6 @@ int iwl_send_add_sta(struct iwl_priv *priv,
171 return cmd.handler_status; 170 return cmd.handler_status;
172} 171}
173 172
174static bool iwl_is_channel_extension(struct iwl_priv *priv,
175 enum ieee80211_band band,
176 u16 channel, u8 extension_chan_offset)
177{
178 const struct iwl_channel_info *ch_info;
179
180 ch_info = iwl_get_channel_info(priv, band, channel);
181 if (!is_channel_valid(ch_info))
182 return false;
183
184 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
185 return !(ch_info->ht40_extension_channel &
186 IEEE80211_CHAN_NO_HT40PLUS);
187 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
188 return !(ch_info->ht40_extension_channel &
189 IEEE80211_CHAN_NO_HT40MINUS);
190
191 return false;
192}
193
194bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv, 173bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
195 struct iwl_rxon_context *ctx, 174 struct iwl_rxon_context *ctx,
196 struct ieee80211_sta_ht_cap *ht_cap) 175 struct ieee80211_sta_ht_cap *ht_cap)
@@ -198,21 +177,25 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
198 if (!ctx->ht.enabled || !ctx->ht.is_40mhz) 177 if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
199 return false; 178 return false;
200 179
180#ifdef CONFIG_IWLWIFI_DEBUGFS
181 if (priv->disable_ht40)
182 return false;
183#endif
184
201 /* 185 /*
202 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40 186 * Remainder of this function checks ht_cap, but if it's
203 * the bit will not set if it is pure 40MHz case 187 * NULL then we can do HT40 (special case for RXON)
204 */ 188 */
205 if (ht_cap && !ht_cap->ht_supported) 189 if (!ht_cap)
190 return true;
191
192 if (!ht_cap->ht_supported)
206 return false; 193 return false;
207 194
208#ifdef CONFIG_IWLWIFI_DEBUGFS 195 if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
209 if (priv->disable_ht40)
210 return false; 196 return false;
211#endif
212 197
213 return iwl_is_channel_extension(priv, priv->band, 198 return true;
214 le16_to_cpu(ctx->staging.channel),
215 ctx->ht.extension_chan_offset);
216} 199}
217 200
218static void iwl_sta_calc_ht_flags(struct iwl_priv *priv, 201static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
@@ -236,6 +219,7 @@ static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
236 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2; 219 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
237 220
238 IWL_DEBUG_INFO(priv, "STA %pM SM PS mode: %s\n", 221 IWL_DEBUG_INFO(priv, "STA %pM SM PS mode: %s\n",
222 sta->addr,
239 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ? 223 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
240 "static" : 224 "static" :
241 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ? 225 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
@@ -649,23 +633,23 @@ static void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
649 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE) 633 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
650 rate_flags |= RATE_MCS_CCK_MSK; 634 rate_flags |= RATE_MCS_CCK_MSK;
651 635
652 rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) << 636 rate_flags |= first_antenna(priv->eeprom_data->valid_tx_ant) <<
653 RATE_MCS_ANT_POS; 637 RATE_MCS_ANT_POS;
654 rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags); 638 rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
655 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) 639 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
656 link_cmd->rs_table[i].rate_n_flags = rate_n_flags; 640 link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
657 641
658 link_cmd->general_params.single_stream_ant_msk = 642 link_cmd->general_params.single_stream_ant_msk =
659 first_antenna(priv->hw_params.valid_tx_ant); 643 first_antenna(priv->eeprom_data->valid_tx_ant);
660 644
661 link_cmd->general_params.dual_stream_ant_msk = 645 link_cmd->general_params.dual_stream_ant_msk =
662 priv->hw_params.valid_tx_ant & 646 priv->eeprom_data->valid_tx_ant &
663 ~first_antenna(priv->hw_params.valid_tx_ant); 647 ~first_antenna(priv->eeprom_data->valid_tx_ant);
664 if (!link_cmd->general_params.dual_stream_ant_msk) { 648 if (!link_cmd->general_params.dual_stream_ant_msk) {
665 link_cmd->general_params.dual_stream_ant_msk = ANT_AB; 649 link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
666 } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) { 650 } else if (num_of_ant(priv->eeprom_data->valid_tx_ant) == 2) {
667 link_cmd->general_params.dual_stream_ant_msk = 651 link_cmd->general_params.dual_stream_ant_msk =
668 priv->hw_params.valid_tx_ant; 652 priv->eeprom_data->valid_tx_ant;
669 } 653 }
670 654
671 link_cmd->agg_params.agg_dis_start_th = 655 link_cmd->agg_params.agg_dis_start_th =
diff --git a/drivers/net/wireless/iwlwifi/dvm/testmode.c b/drivers/net/wireless/iwlwifi/dvm/testmode.c
new file mode 100644
index 000000000000..57b918ce3b5f
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/dvm/testmode.c
@@ -0,0 +1,471 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#include <linux/init.h>
65#include <linux/kernel.h>
66#include <linux/module.h>
67#include <linux/dma-mapping.h>
68#include <net/net_namespace.h>
69#include <linux/netdevice.h>
70#include <net/cfg80211.h>
71#include <net/mac80211.h>
72#include <net/netlink.h>
73
74#include "iwl-debug.h"
75#include "iwl-trans.h"
76#include "dev.h"
77#include "agn.h"
78#include "iwl-test.h"
79#include "iwl-testmode.h"
80
81static int iwl_testmode_send_cmd(struct iwl_op_mode *op_mode,
82 struct iwl_host_cmd *cmd)
83{
84 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
85 return iwl_dvm_send_cmd(priv, cmd);
86}
87
88static bool iwl_testmode_valid_hw_addr(u32 addr)
89{
90 if (iwlagn_hw_valid_rtc_data_addr(addr))
91 return true;
92
93 if (IWLAGN_RTC_INST_LOWER_BOUND <= addr &&
94 addr < IWLAGN_RTC_INST_UPPER_BOUND)
95 return true;
96
97 return false;
98}
99
100static u32 iwl_testmode_get_fw_ver(struct iwl_op_mode *op_mode)
101{
102 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
103 return priv->fw->ucode_ver;
104}
105
106static struct sk_buff*
107iwl_testmode_alloc_reply(struct iwl_op_mode *op_mode, int len)
108{
109 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
110 return cfg80211_testmode_alloc_reply_skb(priv->hw->wiphy, len);
111}
112
113static int iwl_testmode_reply(struct iwl_op_mode *op_mode, struct sk_buff *skb)
114{
115 return cfg80211_testmode_reply(skb);
116}
117
118static struct sk_buff *iwl_testmode_alloc_event(struct iwl_op_mode *op_mode,
119 int len)
120{
121 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
122 return cfg80211_testmode_alloc_event_skb(priv->hw->wiphy, len,
123 GFP_ATOMIC);
124}
125
126static void iwl_testmode_event(struct iwl_op_mode *op_mode, struct sk_buff *skb)
127{
128 return cfg80211_testmode_event(skb, GFP_ATOMIC);
129}
130
131static struct iwl_test_ops tst_ops = {
132 .send_cmd = iwl_testmode_send_cmd,
133 .valid_hw_addr = iwl_testmode_valid_hw_addr,
134 .get_fw_ver = iwl_testmode_get_fw_ver,
135 .alloc_reply = iwl_testmode_alloc_reply,
136 .reply = iwl_testmode_reply,
137 .alloc_event = iwl_testmode_alloc_event,
138 .event = iwl_testmode_event,
139};
140
141void iwl_testmode_init(struct iwl_priv *priv)
142{
143 iwl_test_init(&priv->tst, priv->trans, &tst_ops);
144}
145
146void iwl_testmode_free(struct iwl_priv *priv)
147{
148 iwl_test_free(&priv->tst);
149}
150
151static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
152{
153 struct iwl_notification_wait calib_wait;
154 static const u8 calib_complete[] = {
155 CALIBRATION_COMPLETE_NOTIFICATION
156 };
157 int ret;
158
159 iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
160 calib_complete, ARRAY_SIZE(calib_complete),
161 NULL, NULL);
162 ret = iwl_init_alive_start(priv);
163 if (ret) {
164 IWL_ERR(priv, "Fail init calibration: %d\n", ret);
165 goto cfg_init_calib_error;
166 }
167
168 ret = iwl_wait_notification(&priv->notif_wait, &calib_wait, 2 * HZ);
169 if (ret)
170 IWL_ERR(priv, "Error detecting"
171 " CALIBRATION_COMPLETE_NOTIFICATION: %d\n", ret);
172 return ret;
173
174cfg_init_calib_error:
175 iwl_remove_notification(&priv->notif_wait, &calib_wait);
176 return ret;
177}
178
179/*
180 * This function handles the user application commands for driver.
181 *
182 * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
183 * handlers respectively.
184 *
185 * If it's an unknown commdn ID, -ENOSYS is replied; otherwise, the returned
186 * value of the actual command execution is replied to the user application.
187 *
188 * If there's any message responding to the user space, IWL_TM_ATTR_SYNC_RSP
189 * is used for carry the message while IWL_TM_ATTR_COMMAND must set to
190 * IWL_TM_CMD_DEV2APP_SYNC_RSP.
191 *
192 * @hw: ieee80211_hw object that represents the device
193 * @tb: gnl message fields from the user space
194 */
195static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
196{
197 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
198 struct iwl_trans *trans = priv->trans;
199 struct sk_buff *skb;
200 unsigned char *rsp_data_ptr = NULL;
201 int status = 0, rsp_data_len = 0;
202 u32 inst_size = 0, data_size = 0;
203 const struct fw_img *img;
204
205 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
206 case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
207 rsp_data_ptr = (unsigned char *)priv->cfg->name;
208 rsp_data_len = strlen(priv->cfg->name);
209 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
210 rsp_data_len + 20);
211 if (!skb) {
212 IWL_ERR(priv, "Memory allocation fail\n");
213 return -ENOMEM;
214 }
215 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
216 IWL_TM_CMD_DEV2APP_SYNC_RSP) ||
217 nla_put(skb, IWL_TM_ATTR_SYNC_RSP,
218 rsp_data_len, rsp_data_ptr))
219 goto nla_put_failure;
220 status = cfg80211_testmode_reply(skb);
221 if (status < 0)
222 IWL_ERR(priv, "Error sending msg : %d\n", status);
223 break;
224
225 case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
226 status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_INIT);
227 if (status)
228 IWL_ERR(priv, "Error loading init ucode: %d\n", status);
229 break;
230
231 case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
232 iwl_testmode_cfg_init_calib(priv);
233 priv->ucode_loaded = false;
234 iwl_trans_stop_device(trans);
235 break;
236
237 case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
238 status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR);
239 if (status) {
240 IWL_ERR(priv,
241 "Error loading runtime ucode: %d\n", status);
242 break;
243 }
244 status = iwl_alive_start(priv);
245 if (status)
246 IWL_ERR(priv,
247 "Error starting the device: %d\n", status);
248 break;
249
250 case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
251 iwl_scan_cancel_timeout(priv, 200);
252 priv->ucode_loaded = false;
253 iwl_trans_stop_device(trans);
254 status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_WOWLAN);
255 if (status) {
256 IWL_ERR(priv,
257 "Error loading WOWLAN ucode: %d\n", status);
258 break;
259 }
260 status = iwl_alive_start(priv);
261 if (status)
262 IWL_ERR(priv,
263 "Error starting the device: %d\n", status);
264 break;
265
266 case IWL_TM_CMD_APP2DEV_GET_EEPROM:
267 if (priv->eeprom_blob) {
268 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
269 priv->eeprom_blob_size + 20);
270 if (!skb) {
271 IWL_ERR(priv, "Memory allocation fail\n");
272 return -ENOMEM;
273 }
274 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
275 IWL_TM_CMD_DEV2APP_EEPROM_RSP) ||
276 nla_put(skb, IWL_TM_ATTR_EEPROM,
277 priv->eeprom_blob_size,
278 priv->eeprom_blob))
279 goto nla_put_failure;
280 status = cfg80211_testmode_reply(skb);
281 if (status < 0)
282 IWL_ERR(priv, "Error sending msg : %d\n",
283 status);
284 } else
285 return -ENODATA;
286 break;
287
288 case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
289 if (!tb[IWL_TM_ATTR_FIXRATE]) {
290 IWL_ERR(priv, "Missing fixrate setting\n");
291 return -ENOMSG;
292 }
293 priv->tm_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]);
294 break;
295
296 case IWL_TM_CMD_APP2DEV_GET_FW_INFO:
297 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20 + 8);
298 if (!skb) {
299 IWL_ERR(priv, "Memory allocation fail\n");
300 return -ENOMEM;
301 }
302 if (!priv->ucode_loaded) {
303 IWL_ERR(priv, "No uCode has not been loaded\n");
304 return -EINVAL;
305 } else {
306 img = &priv->fw->img[priv->cur_ucode];
307 inst_size = img->sec[IWL_UCODE_SECTION_INST].len;
308 data_size = img->sec[IWL_UCODE_SECTION_DATA].len;
309 }
310 if (nla_put_u32(skb, IWL_TM_ATTR_FW_TYPE, priv->cur_ucode) ||
311 nla_put_u32(skb, IWL_TM_ATTR_FW_INST_SIZE, inst_size) ||
312 nla_put_u32(skb, IWL_TM_ATTR_FW_DATA_SIZE, data_size))
313 goto nla_put_failure;
314 status = cfg80211_testmode_reply(skb);
315 if (status < 0)
316 IWL_ERR(priv, "Error sending msg : %d\n", status);
317 break;
318
319 default:
320 IWL_ERR(priv, "Unknown testmode driver command ID\n");
321 return -ENOSYS;
322 }
323 return status;
324
325nla_put_failure:
326 kfree_skb(skb);
327 return -EMSGSIZE;
328}
329
330/*
331 * This function handles the user application switch ucode ownership.
332 *
333 * It retrieves the mandatory fields IWL_TM_ATTR_UCODE_OWNER and
334 * decide who the current owner of the uCode
335 *
336 * If the current owner is OWNERSHIP_TM, then the only host command
337 * can deliver to uCode is from testmode, all the other host commands
338 * will dropped.
339 *
340 * default driver is the owner of uCode in normal operational mode
341 *
342 * @hw: ieee80211_hw object that represents the device
343 * @tb: gnl message fields from the user space
344 */
345static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
346{
347 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
348 u8 owner;
349
350 if (!tb[IWL_TM_ATTR_UCODE_OWNER]) {
351 IWL_ERR(priv, "Missing ucode owner\n");
352 return -ENOMSG;
353 }
354
355 owner = nla_get_u8(tb[IWL_TM_ATTR_UCODE_OWNER]);
356 if (owner == IWL_OWNERSHIP_DRIVER) {
357 priv->ucode_owner = owner;
358 iwl_test_enable_notifications(&priv->tst, false);
359 } else if (owner == IWL_OWNERSHIP_TM) {
360 priv->ucode_owner = owner;
361 iwl_test_enable_notifications(&priv->tst, true);
362 } else {
363 IWL_ERR(priv, "Invalid owner\n");
364 return -EINVAL;
365 }
366 return 0;
367}
368
369/* The testmode gnl message handler that takes the gnl message from the
370 * user space and parses it per the policy iwl_testmode_gnl_msg_policy, then
371 * invoke the corresponding handlers.
372 *
373 * This function is invoked when there is user space application sending
374 * gnl message through the testmode tunnel NL80211_CMD_TESTMODE regulated
375 * by nl80211.
376 *
377 * It retrieves the mandatory field, IWL_TM_ATTR_COMMAND, before
378 * dispatching it to the corresponding handler.
379 *
380 * If IWL_TM_ATTR_COMMAND is missing, -ENOMSG is replied to user application;
381 * -ENOSYS is replied to the user application if the command is unknown;
382 * Otherwise, the command is dispatched to the respective handler.
383 *
384 * @hw: ieee80211_hw object that represents the device
385 * @data: pointer to user space message
386 * @len: length in byte of @data
387 */
388int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
389{
390 struct nlattr *tb[IWL_TM_ATTR_MAX];
391 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
392 int result;
393
394 result = iwl_test_parse(&priv->tst, tb, data, len);
395 if (result)
396 return result;
397
398 /* in case multiple accesses to the device happens */
399 mutex_lock(&priv->mutex);
400 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
401 case IWL_TM_CMD_APP2DEV_UCODE:
402 case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
403 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
404 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
405 case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
406 case IWL_TM_CMD_APP2DEV_END_TRACE:
407 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
408 case IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
409 case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
410 case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
411 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
412 result = iwl_test_handle_cmd(&priv->tst, tb);
413 break;
414
415 case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
416 case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
417 case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
418 case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
419 case IWL_TM_CMD_APP2DEV_GET_EEPROM:
420 case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
421 case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
422 case IWL_TM_CMD_APP2DEV_GET_FW_INFO:
423 IWL_DEBUG_INFO(priv, "testmode cmd to driver\n");
424 result = iwl_testmode_driver(hw, tb);
425 break;
426
427 case IWL_TM_CMD_APP2DEV_OWNERSHIP:
428 IWL_DEBUG_INFO(priv, "testmode change uCode ownership\n");
429 result = iwl_testmode_ownership(hw, tb);
430 break;
431
432 default:
433 IWL_ERR(priv, "Unknown testmode command\n");
434 result = -ENOSYS;
435 break;
436 }
437 mutex_unlock(&priv->mutex);
438
439 if (result)
440 IWL_ERR(priv, "Test cmd failed result=%d\n", result);
441 return result;
442}
443
444int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
445 struct netlink_callback *cb,
446 void *data, int len)
447{
448 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
449 int result;
450 u32 cmd;
451
452 if (cb->args[3]) {
453 /* offset by 1 since commands start at 0 */
454 cmd = cb->args[3] - 1;
455 } else {
456 struct nlattr *tb[IWL_TM_ATTR_MAX];
457
458 result = iwl_test_parse(&priv->tst, tb, data, len);
459 if (result)
460 return result;
461
462 cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
463 cb->args[3] = cmd + 1;
464 }
465
466 /* in case multiple accesses to the device happens */
467 mutex_lock(&priv->mutex);
468 result = iwl_test_dump(&priv->tst, cmd, skb, cb);
469 mutex_unlock(&priv->mutex);
470 return result;
471}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c b/drivers/net/wireless/iwlwifi/dvm/tt.c
index a5cfe0aceedb..eb864433e59d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tt.c
@@ -31,17 +31,14 @@
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/init.h> 33#include <linux/init.h>
34
35#include <net/mac80211.h> 34#include <net/mac80211.h>
36
37#include "iwl-agn.h"
38#include "iwl-eeprom.h"
39#include "iwl-dev.h"
40#include "iwl-io.h" 35#include "iwl-io.h"
41#include "iwl-commands.h"
42#include "iwl-debug.h"
43#include "iwl-agn-tt.h"
44#include "iwl-modparams.h" 36#include "iwl-modparams.h"
37#include "iwl-debug.h"
38#include "agn.h"
39#include "dev.h"
40#include "commands.h"
41#include "tt.h"
45 42
46/* default Thermal Throttling transaction table 43/* default Thermal Throttling transaction table
47 * Current state | Throttling Down | Throttling Up 44 * Current state | Throttling Down | Throttling Up
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.h b/drivers/net/wireless/iwlwifi/dvm/tt.h
index 86bbf47501c1..44c7c8f30a2d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tt.h
+++ b/drivers/net/wireless/iwlwifi/dvm/tt.h
@@ -28,7 +28,7 @@
28#ifndef __iwl_tt_setting_h__ 28#ifndef __iwl_tt_setting_h__
29#define __iwl_tt_setting_h__ 29#define __iwl_tt_setting_h__
30 30
31#include "iwl-commands.h" 31#include "commands.h"
32 32
33#define IWL_ABSOLUTE_ZERO 0 33#define IWL_ABSOLUTE_ZERO 0
34#define IWL_ABSOLUTE_MAX 0xFFFFFFFF 34#define IWL_ABSOLUTE_MAX 0xFFFFFFFF
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index 3366e2e2f00f..5971a23aa47d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -32,12 +32,11 @@
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/sched.h> 33#include <linux/sched.h>
34#include <linux/ieee80211.h> 34#include <linux/ieee80211.h>
35
36#include "iwl-dev.h"
37#include "iwl-io.h" 35#include "iwl-io.h"
38#include "iwl-agn-hw.h"
39#include "iwl-agn.h"
40#include "iwl-trans.h" 36#include "iwl-trans.h"
37#include "iwl-agn-hw.h"
38#include "dev.h"
39#include "agn.h"
41 40
42static const u8 tid_to_ac[] = { 41static const u8 tid_to_ac[] = {
43 IEEE80211_AC_BE, 42 IEEE80211_AC_BE,
@@ -187,7 +186,8 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
187 rate_idx = info->control.rates[0].idx; 186 rate_idx = info->control.rates[0].idx;
188 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS || 187 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
189 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY)) 188 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
190 rate_idx = rate_lowest_index(&priv->bands[info->band], 189 rate_idx = rate_lowest_index(
190 &priv->eeprom_data->bands[info->band],
191 info->control.sta); 191 info->control.sta);
192 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ 192 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
193 if (info->band == IEEE80211_BAND_5GHZ) 193 if (info->band == IEEE80211_BAND_5GHZ)
@@ -207,10 +207,11 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
207 priv->bt_full_concurrent) { 207 priv->bt_full_concurrent) {
208 /* operated as 1x1 in full concurrency mode */ 208 /* operated as 1x1 in full concurrency mode */
209 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, 209 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
210 first_antenna(priv->hw_params.valid_tx_ant)); 210 first_antenna(priv->eeprom_data->valid_tx_ant));
211 } else 211 } else
212 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, 212 priv->mgmt_tx_ant = iwl_toggle_tx_ant(
213 priv->hw_params.valid_tx_ant); 213 priv, priv->mgmt_tx_ant,
214 priv->eeprom_data->valid_tx_ant);
214 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant); 215 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
215 216
216 /* Set the rate in the TX cmd */ 217 /* Set the rate in the TX cmd */
@@ -296,7 +297,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
296 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 297 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
297 struct iwl_station_priv *sta_priv = NULL; 298 struct iwl_station_priv *sta_priv = NULL;
298 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 299 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
299 struct iwl_device_cmd *dev_cmd = NULL; 300 struct iwl_device_cmd *dev_cmd;
300 struct iwl_tx_cmd *tx_cmd; 301 struct iwl_tx_cmd *tx_cmd;
301 __le16 fc; 302 __le16 fc;
302 u8 hdr_len; 303 u8 hdr_len;
@@ -378,7 +379,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
378 if (info->flags & IEEE80211_TX_CTL_AMPDU) 379 if (info->flags & IEEE80211_TX_CTL_AMPDU)
379 is_agg = true; 380 is_agg = true;
380 381
381 dev_cmd = kmem_cache_alloc(iwl_tx_cmd_pool, GFP_ATOMIC); 382 dev_cmd = iwl_trans_alloc_tx_cmd(priv->trans);
382 383
383 if (unlikely(!dev_cmd)) 384 if (unlikely(!dev_cmd))
384 goto drop_unlock_priv; 385 goto drop_unlock_priv;
@@ -402,6 +403,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
402 403
403 info->driver_data[0] = ctx; 404 info->driver_data[0] = ctx;
404 info->driver_data[1] = dev_cmd; 405 info->driver_data[1] = dev_cmd;
406 /* From now on, we cannot access info->control */
405 407
406 spin_lock(&priv->sta_lock); 408 spin_lock(&priv->sta_lock);
407 409
@@ -486,11 +488,14 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
486 if (sta_priv && sta_priv->client && !is_agg) 488 if (sta_priv && sta_priv->client && !is_agg)
487 atomic_inc(&sta_priv->pending_frames); 489 atomic_inc(&sta_priv->pending_frames);
488 490
491 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
492 iwl_scan_offchannel_skb(priv);
493
489 return 0; 494 return 0;
490 495
491drop_unlock_sta: 496drop_unlock_sta:
492 if (dev_cmd) 497 if (dev_cmd)
493 kmem_cache_free(iwl_tx_cmd_pool, dev_cmd); 498 iwl_trans_free_tx_cmd(priv->trans, dev_cmd);
494 spin_unlock(&priv->sta_lock); 499 spin_unlock(&priv->sta_lock);
495drop_unlock_priv: 500drop_unlock_priv:
496 return -1; 501 return -1;
@@ -597,7 +602,7 @@ turn_off:
597 * time, or we hadn't time to drain the AC queues. 602 * time, or we hadn't time to drain the AC queues.
598 */ 603 */
599 if (agg_state == IWL_AGG_ON) 604 if (agg_state == IWL_AGG_ON)
600 iwl_trans_tx_agg_disable(priv->trans, txq_id); 605 iwl_trans_txq_disable(priv->trans, txq_id);
601 else 606 else
602 IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n", 607 IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n",
603 agg_state); 608 agg_state);
@@ -686,9 +691,8 @@ int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
686 691
687 fifo = ctx->ac_to_fifo[tid_to_ac[tid]]; 692 fifo = ctx->ac_to_fifo[tid_to_ac[tid]];
688 693
689 iwl_trans_tx_agg_setup(priv->trans, q, fifo, 694 iwl_trans_txq_enable(priv->trans, q, fifo, sta_priv->sta_id, tid,
690 sta_priv->sta_id, tid, 695 buf_size, ssn);
691 buf_size, ssn);
692 696
693 /* 697 /*
694 * If the limit is 0, then it wasn't initialised yet, 698 * If the limit is 0, then it wasn't initialised yet,
@@ -753,8 +757,8 @@ static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
753 IWL_DEBUG_TX_QUEUES(priv, 757 IWL_DEBUG_TX_QUEUES(priv,
754 "Can continue DELBA flow ssn = next_recl =" 758 "Can continue DELBA flow ssn = next_recl ="
755 " %d", tid_data->next_reclaimed); 759 " %d", tid_data->next_reclaimed);
756 iwl_trans_tx_agg_disable(priv->trans, 760 iwl_trans_txq_disable(priv->trans,
757 tid_data->agg.txq_id); 761 tid_data->agg.txq_id);
758 iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id); 762 iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id);
759 tid_data->agg.state = IWL_AGG_OFF; 763 tid_data->agg.state = IWL_AGG_OFF;
760 ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid); 764 ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
@@ -1136,6 +1140,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1136 struct sk_buff *skb; 1140 struct sk_buff *skb;
1137 struct iwl_rxon_context *ctx; 1141 struct iwl_rxon_context *ctx;
1138 bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE); 1142 bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
1143 bool is_offchannel_skb;
1139 1144
1140 tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >> 1145 tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
1141 IWLAGN_TX_RES_TID_POS; 1146 IWLAGN_TX_RES_TID_POS;
@@ -1149,6 +1154,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1149 1154
1150 __skb_queue_head_init(&skbs); 1155 __skb_queue_head_init(&skbs);
1151 1156
1157 is_offchannel_skb = false;
1158
1152 if (tx_resp->frame_count == 1) { 1159 if (tx_resp->frame_count == 1) {
1153 u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl); 1160 u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl);
1154 next_reclaimed = SEQ_TO_SN(next_reclaimed + 0x10); 1161 next_reclaimed = SEQ_TO_SN(next_reclaimed + 0x10);
@@ -1176,7 +1183,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1176 } 1183 }
1177 1184
1178 /*we can free until ssn % q.n_bd not inclusive */ 1185 /*we can free until ssn % q.n_bd not inclusive */
1179 WARN_ON(iwl_reclaim(priv, sta_id, tid, txq_id, ssn, &skbs)); 1186 WARN_ON_ONCE(iwl_reclaim(priv, sta_id, tid,
1187 txq_id, ssn, &skbs));
1180 iwlagn_check_ratid_empty(priv, sta_id, tid); 1188 iwlagn_check_ratid_empty(priv, sta_id, tid);
1181 freed = 0; 1189 freed = 0;
1182 1190
@@ -1189,8 +1197,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1189 1197
1190 info = IEEE80211_SKB_CB(skb); 1198 info = IEEE80211_SKB_CB(skb);
1191 ctx = info->driver_data[0]; 1199 ctx = info->driver_data[0];
1192 kmem_cache_free(iwl_tx_cmd_pool, 1200 iwl_trans_free_tx_cmd(priv->trans,
1193 (info->driver_data[1])); 1201 info->driver_data[1]);
1194 1202
1195 memset(&info->status, 0, sizeof(info->status)); 1203 memset(&info->status, 0, sizeof(info->status));
1196 1204
@@ -1225,10 +1233,19 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1225 if (!is_agg) 1233 if (!is_agg)
1226 iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1); 1234 iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
1227 1235
1236 is_offchannel_skb =
1237 (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN);
1228 freed++; 1238 freed++;
1229 } 1239 }
1230 1240
1231 WARN_ON(!is_agg && freed != 1); 1241 WARN_ON(!is_agg && freed != 1);
1242
1243 /*
1244 * An offchannel frame can be send only on the AUX queue, where
1245 * there is no aggregation (and reordering) so it only is single
1246 * skb is expected to be processed.
1247 */
1248 WARN_ON(is_offchannel_skb && freed != 1);
1232 } 1249 }
1233 1250
1234 iwl_check_abort_status(priv, tx_resp->frame_count, status); 1251 iwl_check_abort_status(priv, tx_resp->frame_count, status);
@@ -1239,6 +1256,9 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1239 ieee80211_tx_status(priv->hw, skb); 1256 ieee80211_tx_status(priv->hw, skb);
1240 } 1257 }
1241 1258
1259 if (is_offchannel_skb)
1260 iwl_scan_offchannel_skb_status(priv);
1261
1242 return 0; 1262 return 0;
1243} 1263}
1244 1264
@@ -1341,7 +1361,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
1341 WARN_ON_ONCE(1); 1361 WARN_ON_ONCE(1);
1342 1362
1343 info = IEEE80211_SKB_CB(skb); 1363 info = IEEE80211_SKB_CB(skb);
1344 kmem_cache_free(iwl_tx_cmd_pool, (info->driver_data[1])); 1364 iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
1345 1365
1346 if (freed == 1) { 1366 if (freed == 1) {
1347 /* this is the first skb we deliver in this batch */ 1367 /* this is the first skb we deliver in this batch */
diff --git a/drivers/net/wireless/iwlwifi/iwl-ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index bc40dc68b0f4..6d8d6dd7943f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -30,15 +30,16 @@
30#include <linux/kernel.h> 30#include <linux/kernel.h>
31#include <linux/init.h> 31#include <linux/init.h>
32 32
33#include "iwl-dev.h"
34#include "iwl-io.h" 33#include "iwl-io.h"
35#include "iwl-agn-hw.h" 34#include "iwl-agn-hw.h"
36#include "iwl-agn.h"
37#include "iwl-agn-calib.h"
38#include "iwl-trans.h" 35#include "iwl-trans.h"
39#include "iwl-fh.h" 36#include "iwl-fh.h"
40#include "iwl-op-mode.h" 37#include "iwl-op-mode.h"
41 38
39#include "dev.h"
40#include "agn.h"
41#include "calib.h"
42
42/****************************************************************************** 43/******************************************************************************
43 * 44 *
44 * uCode download functions 45 * uCode download functions
@@ -60,8 +61,7 @@ iwl_get_ucode_image(struct iwl_priv *priv, enum iwl_ucode_type ucode_type)
60static int iwl_set_Xtal_calib(struct iwl_priv *priv) 61static int iwl_set_Xtal_calib(struct iwl_priv *priv)
61{ 62{
62 struct iwl_calib_xtal_freq_cmd cmd; 63 struct iwl_calib_xtal_freq_cmd cmd;
63 __le16 *xtal_calib = 64 __le16 *xtal_calib = priv->eeprom_data->xtal_calib;
64 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_XTAL);
65 65
66 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD); 66 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD);
67 cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]); 67 cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
@@ -72,12 +72,10 @@ static int iwl_set_Xtal_calib(struct iwl_priv *priv)
72static int iwl_set_temperature_offset_calib(struct iwl_priv *priv) 72static int iwl_set_temperature_offset_calib(struct iwl_priv *priv)
73{ 73{
74 struct iwl_calib_temperature_offset_cmd cmd; 74 struct iwl_calib_temperature_offset_cmd cmd;
75 __le16 *offset_calib =
76 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE);
77 75
78 memset(&cmd, 0, sizeof(cmd)); 76 memset(&cmd, 0, sizeof(cmd));
79 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); 77 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
80 memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(*offset_calib)); 78 cmd.radio_sensor_offset = priv->eeprom_data->raw_temperature;
81 if (!(cmd.radio_sensor_offset)) 79 if (!(cmd.radio_sensor_offset))
82 cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET; 80 cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
83 81
@@ -89,27 +87,17 @@ static int iwl_set_temperature_offset_calib(struct iwl_priv *priv)
89static int iwl_set_temperature_offset_calib_v2(struct iwl_priv *priv) 87static int iwl_set_temperature_offset_calib_v2(struct iwl_priv *priv)
90{ 88{
91 struct iwl_calib_temperature_offset_v2_cmd cmd; 89 struct iwl_calib_temperature_offset_v2_cmd cmd;
92 __le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(priv,
93 EEPROM_KELVIN_TEMPERATURE);
94 __le16 *offset_calib_low =
95 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE);
96 struct iwl_eeprom_calib_hdr *hdr;
97 90
98 memset(&cmd, 0, sizeof(cmd)); 91 memset(&cmd, 0, sizeof(cmd));
99 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); 92 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
100 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv, 93 cmd.radio_sensor_offset_high = priv->eeprom_data->kelvin_temperature;
101 EEPROM_CALIB_ALL); 94 cmd.radio_sensor_offset_low = priv->eeprom_data->raw_temperature;
102 memcpy(&cmd.radio_sensor_offset_high, offset_calib_high, 95 if (!cmd.radio_sensor_offset_low) {
103 sizeof(*offset_calib_high));
104 memcpy(&cmd.radio_sensor_offset_low, offset_calib_low,
105 sizeof(*offset_calib_low));
106 if (!(cmd.radio_sensor_offset_low)) {
107 IWL_DEBUG_CALIB(priv, "no info in EEPROM, use default\n"); 96 IWL_DEBUG_CALIB(priv, "no info in EEPROM, use default\n");
108 cmd.radio_sensor_offset_low = DEFAULT_RADIO_SENSOR_OFFSET; 97 cmd.radio_sensor_offset_low = DEFAULT_RADIO_SENSOR_OFFSET;
109 cmd.radio_sensor_offset_high = DEFAULT_RADIO_SENSOR_OFFSET; 98 cmd.radio_sensor_offset_high = DEFAULT_RADIO_SENSOR_OFFSET;
110 } 99 }
111 memcpy(&cmd.burntVoltageRef, &hdr->voltage, 100 cmd.burntVoltageRef = priv->eeprom_data->calib_voltage;
112 sizeof(hdr->voltage));
113 101
114 IWL_DEBUG_CALIB(priv, "Radio sensor offset high: %d\n", 102 IWL_DEBUG_CALIB(priv, "Radio sensor offset high: %d\n",
115 le16_to_cpu(cmd.radio_sensor_offset_high)); 103 le16_to_cpu(cmd.radio_sensor_offset_high));
@@ -177,7 +165,7 @@ int iwl_init_alive_start(struct iwl_priv *priv)
177 return 0; 165 return 0;
178} 166}
179 167
180int iwl_send_wimax_coex(struct iwl_priv *priv) 168static int iwl_send_wimax_coex(struct iwl_priv *priv)
181{ 169{
182 struct iwl_wimax_coex_cmd coex_cmd; 170 struct iwl_wimax_coex_cmd coex_cmd;
183 171
@@ -238,13 +226,50 @@ int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
238 return ret; 226 return ret;
239} 227}
240 228
229static const u8 iwlagn_default_queue_to_tx_fifo[] = {
230 IWL_TX_FIFO_VO,
231 IWL_TX_FIFO_VI,
232 IWL_TX_FIFO_BE,
233 IWL_TX_FIFO_BK,
234};
235
236static const u8 iwlagn_ipan_queue_to_tx_fifo[] = {
237 IWL_TX_FIFO_VO,
238 IWL_TX_FIFO_VI,
239 IWL_TX_FIFO_BE,
240 IWL_TX_FIFO_BK,
241 IWL_TX_FIFO_BK_IPAN,
242 IWL_TX_FIFO_BE_IPAN,
243 IWL_TX_FIFO_VI_IPAN,
244 IWL_TX_FIFO_VO_IPAN,
245 IWL_TX_FIFO_BE_IPAN,
246 IWL_TX_FIFO_UNUSED,
247 IWL_TX_FIFO_AUX,
248};
241 249
242static int iwl_alive_notify(struct iwl_priv *priv) 250static int iwl_alive_notify(struct iwl_priv *priv)
243{ 251{
252 const u8 *queue_to_txf;
253 u8 n_queues;
244 int ret; 254 int ret;
255 int i;
245 256
246 iwl_trans_fw_alive(priv->trans); 257 iwl_trans_fw_alive(priv->trans);
247 258
259 if (priv->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN &&
260 priv->eeprom_data->sku & EEPROM_SKU_CAP_IPAN_ENABLE) {
261 n_queues = ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo);
262 queue_to_txf = iwlagn_ipan_queue_to_tx_fifo;
263 } else {
264 n_queues = ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo);
265 queue_to_txf = iwlagn_default_queue_to_tx_fifo;
266 }
267
268 for (i = 0; i < n_queues; i++)
269 if (queue_to_txf[i] != IWL_TX_FIFO_UNUSED)
270 iwl_trans_ac_txq_enable(priv->trans, i,
271 queue_to_txf[i]);
272
248 priv->passive_no_rx = false; 273 priv->passive_no_rx = false;
249 priv->transport_queue_stop = 0; 274 priv->transport_queue_stop = 0;
250 275
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 67b28aa7f9be..10e47938b635 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -113,7 +113,7 @@ enum iwl_led_mode {
113#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE 0 113#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE 0
114 114
115/* TX queue watchdog timeouts in mSecs */ 115/* TX queue watchdog timeouts in mSecs */
116#define IWL_WATCHHDOG_DISABLED 0 116#define IWL_WATCHDOG_DISABLED 0
117#define IWL_DEF_WD_TIMEOUT 2000 117#define IWL_DEF_WD_TIMEOUT 2000
118#define IWL_LONG_WD_TIMEOUT 10000 118#define IWL_LONG_WD_TIMEOUT 10000
119#define IWL_MAX_WD_TIMEOUT 120000 119#define IWL_MAX_WD_TIMEOUT 120000
@@ -143,7 +143,7 @@ enum iwl_led_mode {
143 * @chain_noise_scale: default chain noise scale used for gain computation 143 * @chain_noise_scale: default chain noise scale used for gain computation
144 * @wd_timeout: TX queues watchdog timeout 144 * @wd_timeout: TX queues watchdog timeout
145 * @max_event_log_size: size of event log buffer size for ucode event logging 145 * @max_event_log_size: size of event log buffer size for ucode event logging
146 * @shadow_reg_enable: HW shadhow register bit 146 * @shadow_reg_enable: HW shadow register support
147 * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up 147 * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
148 * @no_idle_support: do not support idle mode 148 * @no_idle_support: do not support idle mode
149 */ 149 */
@@ -182,13 +182,34 @@ struct iwl_bt_params {
182 bool bt_sco_disable; 182 bool bt_sco_disable;
183 bool bt_session_2; 183 bool bt_session_2;
184}; 184};
185
185/* 186/*
186 * @use_rts_for_aggregation: use rts/cts protection for HT traffic 187 * @use_rts_for_aggregation: use rts/cts protection for HT traffic
188 * @ht40_bands: bitmap of bands (using %IEEE80211_BAND_*) that support HT40
187 */ 189 */
188struct iwl_ht_params { 190struct iwl_ht_params {
191 enum ieee80211_smps_mode smps_mode;
189 const bool ht_greenfield_support; /* if used set to true */ 192 const bool ht_greenfield_support; /* if used set to true */
190 bool use_rts_for_aggregation; 193 bool use_rts_for_aggregation;
191 enum ieee80211_smps_mode smps_mode; 194 u8 ht40_bands;
195};
196
197/*
198 * information on how to parse the EEPROM
199 */
200#define EEPROM_REG_BAND_1_CHANNELS 0x08
201#define EEPROM_REG_BAND_2_CHANNELS 0x26
202#define EEPROM_REG_BAND_3_CHANNELS 0x42
203#define EEPROM_REG_BAND_4_CHANNELS 0x5C
204#define EEPROM_REG_BAND_5_CHANNELS 0x74
205#define EEPROM_REG_BAND_24_HT40_CHANNELS 0x82
206#define EEPROM_REG_BAND_52_HT40_CHANNELS 0x92
207#define EEPROM_6000_REG_BAND_24_HT40_CHANNELS 0x80
208#define EEPROM_REGULATORY_BAND_NO_HT40 0
209
210struct iwl_eeprom_params {
211 const u8 regulatory_bands[7];
212 bool enhanced_txpower;
192}; 213};
193 214
194/** 215/**
@@ -243,6 +264,7 @@ struct iwl_cfg {
243 /* params likely to change within a device family */ 264 /* params likely to change within a device family */
244 const struct iwl_ht_params *ht_params; 265 const struct iwl_ht_params *ht_params;
245 const struct iwl_bt_params *bt_params; 266 const struct iwl_bt_params *bt_params;
267 const struct iwl_eeprom_params *eeprom_params;
246 const bool need_temp_offset_calib; /* if used set to true */ 268 const bool need_temp_offset_calib; /* if used set to true */
247 const bool no_xtal_calib; 269 const bool no_xtal_calib;
248 enum iwl_led_mode led_mode; 270 enum iwl_led_mode led_mode;
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 59750543fce7..34a5287dfc2f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -97,13 +97,10 @@
97/* 97/*
98 * Hardware revision info 98 * Hardware revision info
99 * Bit fields: 99 * Bit fields:
100 * 31-8: Reserved 100 * 31-16: Reserved
101 * 7-4: Type of device: see CSR_HW_REV_TYPE_xxx definitions 101 * 15-4: Type of device: see CSR_HW_REV_TYPE_xxx definitions
102 * 3-2: Revision step: 0 = A, 1 = B, 2 = C, 3 = D 102 * 3-2: Revision step: 0 = A, 1 = B, 2 = C, 3 = D
103 * 1-0: "Dash" (-) value, as in A-1, etc. 103 * 1-0: "Dash" (-) value, as in A-1, etc.
104 *
105 * NOTE: Revision step affects calculation of CCK txpower for 4965.
106 * NOTE: See also CSR_HW_REV_WA_REG (work-around for bug in 4965).
107 */ 104 */
108#define CSR_HW_REV (CSR_BASE+0x028) 105#define CSR_HW_REV (CSR_BASE+0x028)
109 106
@@ -155,9 +152,21 @@
155#define CSR_DBG_LINK_PWR_MGMT_REG (CSR_BASE+0x250) 152#define CSR_DBG_LINK_PWR_MGMT_REG (CSR_BASE+0x250)
156 153
157/* Bits for CSR_HW_IF_CONFIG_REG */ 154/* Bits for CSR_HW_IF_CONFIG_REG */
158#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x00000C00) 155#define CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH (0x00000003)
159#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100) 156#define CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP (0x0000000C)
157#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x000000C0)
158#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100)
160#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200) 159#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200)
160#define CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE (0x00000C00)
161#define CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH (0x00003000)
162#define CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP (0x0000C000)
163
164#define CSR_HW_IF_CONFIG_REG_POS_MAC_DASH (0)
165#define CSR_HW_IF_CONFIG_REG_POS_MAC_STEP (2)
166#define CSR_HW_IF_CONFIG_REG_POS_BOARD_VER (6)
167#define CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE (10)
168#define CSR_HW_IF_CONFIG_REG_POS_PHY_DASH (12)
169#define CSR_HW_IF_CONFIG_REG_POS_PHY_STEP (14)
161 170
162#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000) 171#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000)
163#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000) 172#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
@@ -270,7 +279,10 @@
270 279
271 280
272/* HW REV */ 281/* HW REV */
273#define CSR_HW_REV_TYPE_MSK (0x00001F0) 282#define CSR_HW_REV_DASH(_val) (((_val) & 0x0000003) >> 0)
283#define CSR_HW_REV_STEP(_val) (((_val) & 0x000000C) >> 2)
284
285#define CSR_HW_REV_TYPE_MSK (0x000FFF0)
274#define CSR_HW_REV_TYPE_5300 (0x0000020) 286#define CSR_HW_REV_TYPE_5300 (0x0000020)
275#define CSR_HW_REV_TYPE_5350 (0x0000030) 287#define CSR_HW_REV_TYPE_5350 (0x0000030)
276#define CSR_HW_REV_TYPE_5100 (0x0000050) 288#define CSR_HW_REV_TYPE_5100 (0x0000050)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.c b/drivers/net/wireless/iwlwifi/iwl-debug.c
index 2d1b42847b9b..87535a67de76 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.c
@@ -61,7 +61,11 @@
61 * 61 *
62 *****************************************************************************/ 62 *****************************************************************************/
63 63
64#define DEBUG
65
66#include <linux/device.h>
64#include <linux/interrupt.h> 67#include <linux/interrupt.h>
68#include <linux/export.h>
65#include "iwl-debug.h" 69#include "iwl-debug.h"
66#include "iwl-devtrace.h" 70#include "iwl-devtrace.h"
67 71
@@ -81,8 +85,11 @@ void __iwl_ ##fn(struct device *dev, const char *fmt, ...) \
81} 85}
82 86
83__iwl_fn(warn) 87__iwl_fn(warn)
88EXPORT_SYMBOL_GPL(__iwl_warn);
84__iwl_fn(info) 89__iwl_fn(info)
90EXPORT_SYMBOL_GPL(__iwl_info);
85__iwl_fn(crit) 91__iwl_fn(crit)
92EXPORT_SYMBOL_GPL(__iwl_crit);
86 93
87void __iwl_err(struct device *dev, bool rfkill_prefix, bool trace_only, 94void __iwl_err(struct device *dev, bool rfkill_prefix, bool trace_only,
88 const char *fmt, ...) 95 const char *fmt, ...)
@@ -103,6 +110,7 @@ void __iwl_err(struct device *dev, bool rfkill_prefix, bool trace_only,
103 trace_iwlwifi_err(&vaf); 110 trace_iwlwifi_err(&vaf);
104 va_end(args); 111 va_end(args);
105} 112}
113EXPORT_SYMBOL_GPL(__iwl_err);
106 114
107#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING) 115#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING)
108void __iwl_dbg(struct device *dev, 116void __iwl_dbg(struct device *dev,
@@ -119,10 +127,11 @@ void __iwl_dbg(struct device *dev,
119#ifdef CONFIG_IWLWIFI_DEBUG 127#ifdef CONFIG_IWLWIFI_DEBUG
120 if (iwl_have_debug_level(level) && 128 if (iwl_have_debug_level(level) &&
121 (!limit || net_ratelimit())) 129 (!limit || net_ratelimit()))
122 dev_err(dev, "%c %s %pV", in_interrupt() ? 'I' : 'U', 130 dev_dbg(dev, "%c %s %pV", in_interrupt() ? 'I' : 'U',
123 function, &vaf); 131 function, &vaf);
124#endif 132#endif
125 trace_iwlwifi_dbg(level, in_interrupt(), function, &vaf); 133 trace_iwlwifi_dbg(level, in_interrupt(), function, &vaf);
126 va_end(args); 134 va_end(args);
127} 135}
136EXPORT_SYMBOL_GPL(__iwl_dbg);
128#endif 137#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 8376b842bdba..42b20b0e83bc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -38,13 +38,14 @@ static inline bool iwl_have_debug_level(u32 level)
38} 38}
39 39
40void __iwl_err(struct device *dev, bool rfkill_prefix, bool only_trace, 40void __iwl_err(struct device *dev, bool rfkill_prefix, bool only_trace,
41 const char *fmt, ...); 41 const char *fmt, ...) __printf(4, 5);
42void __iwl_warn(struct device *dev, const char *fmt, ...); 42void __iwl_warn(struct device *dev, const char *fmt, ...) __printf(2, 3);
43void __iwl_info(struct device *dev, const char *fmt, ...); 43void __iwl_info(struct device *dev, const char *fmt, ...) __printf(2, 3);
44void __iwl_crit(struct device *dev, const char *fmt, ...); 44void __iwl_crit(struct device *dev, const char *fmt, ...) __printf(2, 3);
45 45
46/* No matter what is m (priv, bus, trans), this will work */ 46/* No matter what is m (priv, bus, trans), this will work */
47#define IWL_ERR(m, f, a...) __iwl_err((m)->dev, false, false, f, ## a) 47#define IWL_ERR(m, f, a...) __iwl_err((m)->dev, false, false, f, ## a)
48#define IWL_ERR_DEV(d, f, a...) __iwl_err((d), false, false, f, ## a)
48#define IWL_WARN(m, f, a...) __iwl_warn((m)->dev, f, ## a) 49#define IWL_WARN(m, f, a...) __iwl_warn((m)->dev, f, ## a)
49#define IWL_INFO(m, f, a...) __iwl_info((m)->dev, f, ## a) 50#define IWL_INFO(m, f, a...) __iwl_info((m)->dev, f, ## a)
50#define IWL_CRIT(m, f, a...) __iwl_crit((m)->dev, f, ## a) 51#define IWL_CRIT(m, f, a...) __iwl_crit((m)->dev, f, ## a)
@@ -52,9 +53,9 @@ void __iwl_crit(struct device *dev, const char *fmt, ...);
52#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING) 53#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING)
53void __iwl_dbg(struct device *dev, 54void __iwl_dbg(struct device *dev,
54 u32 level, bool limit, const char *function, 55 u32 level, bool limit, const char *function,
55 const char *fmt, ...); 56 const char *fmt, ...) __printf(5, 6);
56#else 57#else
57static inline void 58__printf(5, 6) static inline void
58__iwl_dbg(struct device *dev, 59__iwl_dbg(struct device *dev,
59 u32 level, bool limit, const char *function, 60 u32 level, bool limit, const char *function,
60 const char *fmt, ...) 61 const char *fmt, ...)
@@ -69,6 +70,8 @@ do { \
69 70
70#define IWL_DEBUG(m, level, fmt, args...) \ 71#define IWL_DEBUG(m, level, fmt, args...) \
71 __iwl_dbg((m)->dev, level, false, __func__, fmt, ##args) 72 __iwl_dbg((m)->dev, level, false, __func__, fmt, ##args)
73#define IWL_DEBUG_DEV(dev, level, fmt, args...) \
74 __iwl_dbg((dev), level, false, __func__, fmt, ##args)
72#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \ 75#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \
73 __iwl_dbg((m)->dev, level, true, __func__, fmt, ##args) 76 __iwl_dbg((m)->dev, level, true, __func__, fmt, ##args)
74 77
@@ -153,7 +156,7 @@ do { \
153#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a) 156#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a)
154#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a) 157#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
155#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a) 158#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
156#define IWL_DEBUG_EEPROM(p, f, a...) IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a) 159#define IWL_DEBUG_EEPROM(d, f, a...) IWL_DEBUG_DEV(d, IWL_DL_EEPROM, f, ## a)
157#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a) 160#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
158#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a) 161#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
159#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a) 162#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
index 91f45e71e0a2..70191ddbd8f6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -42,4 +42,9 @@ EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event);
42EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error); 42EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error);
43EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event); 43EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event);
44EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_wrap_event); 44EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_wrap_event);
45EXPORT_TRACEPOINT_SYMBOL(iwlwifi_info);
46EXPORT_TRACEPOINT_SYMBOL(iwlwifi_warn);
47EXPORT_TRACEPOINT_SYMBOL(iwlwifi_crit);
48EXPORT_TRACEPOINT_SYMBOL(iwlwifi_err);
49EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dbg);
45#endif 50#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 06203d6a1d86..06ca505bb2cc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -28,6 +28,7 @@
28#define __IWLWIFI_DEVICE_TRACE 28#define __IWLWIFI_DEVICE_TRACE
29 29
30#include <linux/tracepoint.h> 30#include <linux/tracepoint.h>
31#include <linux/device.h>
31 32
32 33
33#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__) 34#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__)
@@ -175,7 +176,7 @@ TRACE_EVENT(iwlwifi_dev_ucode_wrap_event,
175#undef TRACE_SYSTEM 176#undef TRACE_SYSTEM
176#define TRACE_SYSTEM iwlwifi_msg 177#define TRACE_SYSTEM iwlwifi_msg
177 178
178#define MAX_MSG_LEN 100 179#define MAX_MSG_LEN 110
179 180
180DECLARE_EVENT_CLASS(iwlwifi_msg_event, 181DECLARE_EVENT_CLASS(iwlwifi_msg_event,
181 TP_PROTO(struct va_format *vaf), 182 TP_PROTO(struct va_format *vaf),
@@ -188,7 +189,7 @@ DECLARE_EVENT_CLASS(iwlwifi_msg_event,
188 MAX_MSG_LEN, vaf->fmt, 189 MAX_MSG_LEN, vaf->fmt,
189 *vaf->va) >= MAX_MSG_LEN); 190 *vaf->va) >= MAX_MSG_LEN);
190 ), 191 ),
191 TP_printk("%s", (char *)__get_dynamic_array(msg)) 192 TP_printk("%s", __get_str(msg))
192); 193);
193 194
194DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_err, 195DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_err,
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index fac67a526a30..cc41cfaedfbd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -77,8 +77,33 @@
77/* private includes */ 77/* private includes */
78#include "iwl-fw-file.h" 78#include "iwl-fw-file.h"
79 79
80/******************************************************************************
81 *
82 * module boiler plate
83 *
84 ******************************************************************************/
85
86/*
87 * module name, copyright, version, etc.
88 */
89#define DRV_DESCRIPTION "Intel(R) Wireless WiFi driver for Linux"
90
91#ifdef CONFIG_IWLWIFI_DEBUG
92#define VD "d"
93#else
94#define VD
95#endif
96
97#define DRV_VERSION IWLWIFI_VERSION VD
98
99MODULE_DESCRIPTION(DRV_DESCRIPTION);
100MODULE_VERSION(DRV_VERSION);
101MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
102MODULE_LICENSE("GPL");
103
80/** 104/**
81 * struct iwl_drv - drv common data 105 * struct iwl_drv - drv common data
106 * @list: list of drv structures using this opmode
82 * @fw: the iwl_fw structure 107 * @fw: the iwl_fw structure
83 * @op_mode: the running op_mode 108 * @op_mode: the running op_mode
84 * @trans: transport layer 109 * @trans: transport layer
@@ -89,6 +114,7 @@
89 * @request_firmware_complete: the firmware has been obtained from user space 114 * @request_firmware_complete: the firmware has been obtained from user space
90 */ 115 */
91struct iwl_drv { 116struct iwl_drv {
117 struct list_head list;
92 struct iwl_fw fw; 118 struct iwl_fw fw;
93 119
94 struct iwl_op_mode *op_mode; 120 struct iwl_op_mode *op_mode;
@@ -102,7 +128,19 @@ struct iwl_drv {
102 struct completion request_firmware_complete; 128 struct completion request_firmware_complete;
103}; 129};
104 130
105 131#define DVM_OP_MODE 0
132#define MVM_OP_MODE 1
133
134/* Protects the table contents, i.e. the ops pointer & drv list */
135static struct mutex iwlwifi_opmode_table_mtx;
136static struct iwlwifi_opmode_table {
137 const char *name; /* name: iwldvm, iwlmvm, etc */
138 const struct iwl_op_mode_ops *ops; /* pointer to op_mode ops */
139 struct list_head drv; /* list of devices using this op_mode */
140} iwlwifi_opmode_table[] = { /* ops set when driver is initialized */
141 { .name = "iwldvm", .ops = NULL },
142 { .name = "iwlmvm", .ops = NULL },
143};
106 144
107/* 145/*
108 * struct fw_sec: Just for the image parsing proccess. 146 * struct fw_sec: Just for the image parsing proccess.
@@ -721,7 +759,6 @@ static int validate_sec_sizes(struct iwl_drv *drv,
721 return 0; 759 return 0;
722} 760}
723 761
724
725/** 762/**
726 * iwl_ucode_callback - callback when firmware was loaded 763 * iwl_ucode_callback - callback when firmware was loaded
727 * 764 *
@@ -733,6 +770,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
733 struct iwl_drv *drv = context; 770 struct iwl_drv *drv = context;
734 struct iwl_fw *fw = &drv->fw; 771 struct iwl_fw *fw = &drv->fw;
735 struct iwl_ucode_header *ucode; 772 struct iwl_ucode_header *ucode;
773 struct iwlwifi_opmode_table *op;
736 int err; 774 int err;
737 struct iwl_firmware_pieces pieces; 775 struct iwl_firmware_pieces pieces;
738 const unsigned int api_max = drv->cfg->ucode_api_max; 776 const unsigned int api_max = drv->cfg->ucode_api_max;
@@ -740,6 +778,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
740 const unsigned int api_min = drv->cfg->ucode_api_min; 778 const unsigned int api_min = drv->cfg->ucode_api_min;
741 u32 api_ver; 779 u32 api_ver;
742 int i; 780 int i;
781 bool load_module = false;
743 782
744 fw->ucode_capa.max_probe_length = 200; 783 fw->ucode_capa.max_probe_length = 200;
745 fw->ucode_capa.standard_phy_calibration_size = 784 fw->ucode_capa.standard_phy_calibration_size =
@@ -862,10 +901,24 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
862 /* We have our copies now, allow OS release its copies */ 901 /* We have our copies now, allow OS release its copies */
863 release_firmware(ucode_raw); 902 release_firmware(ucode_raw);
864 903
865 drv->op_mode = iwl_dvm_ops.start(drv->trans, drv->cfg, &drv->fw); 904 mutex_lock(&iwlwifi_opmode_table_mtx);
905 op = &iwlwifi_opmode_table[DVM_OP_MODE];
866 906
867 if (!drv->op_mode) 907 /* add this device to the list of devices using this op_mode */
868 goto out_unbind; 908 list_add_tail(&drv->list, &op->drv);
909
910 if (op->ops) {
911 const struct iwl_op_mode_ops *ops = op->ops;
912 drv->op_mode = ops->start(drv->trans, drv->cfg, &drv->fw);
913
914 if (!drv->op_mode) {
915 mutex_unlock(&iwlwifi_opmode_table_mtx);
916 goto out_unbind;
917 }
918 } else {
919 load_module = true;
920 }
921 mutex_unlock(&iwlwifi_opmode_table_mtx);
869 922
870 /* 923 /*
871 * Complete the firmware request last so that 924 * Complete the firmware request last so that
@@ -873,6 +926,14 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
873 * are doing the start() above. 926 * are doing the start() above.
874 */ 927 */
875 complete(&drv->request_firmware_complete); 928 complete(&drv->request_firmware_complete);
929
930 /*
931 * Load the module last so we don't block anything
932 * else from proceeding if the module fails to load
933 * or hangs loading.
934 */
935 if (load_module)
936 request_module("%s", op->name);
876 return; 937 return;
877 938
878 try_again: 939 try_again:
@@ -906,6 +967,7 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
906 drv->cfg = cfg; 967 drv->cfg = cfg;
907 968
908 init_completion(&drv->request_firmware_complete); 969 init_completion(&drv->request_firmware_complete);
970 INIT_LIST_HEAD(&drv->list);
909 971
910 ret = iwl_request_firmware(drv, true); 972 ret = iwl_request_firmware(drv, true);
911 973
@@ -928,6 +990,16 @@ void iwl_drv_stop(struct iwl_drv *drv)
928 990
929 iwl_dealloc_ucode(drv); 991 iwl_dealloc_ucode(drv);
930 992
993 mutex_lock(&iwlwifi_opmode_table_mtx);
994 /*
995 * List is empty (this item wasn't added)
996 * when firmware loading failed -- in that
997 * case we can't remove it from any list.
998 */
999 if (!list_empty(&drv->list))
1000 list_del(&drv->list);
1001 mutex_unlock(&iwlwifi_opmode_table_mtx);
1002
931 kfree(drv); 1003 kfree(drv);
932} 1004}
933 1005
@@ -941,8 +1013,78 @@ struct iwl_mod_params iwlwifi_mod_params = {
941 .power_level = IWL_POWER_INDEX_1, 1013 .power_level = IWL_POWER_INDEX_1,
942 .bt_ch_announce = true, 1014 .bt_ch_announce = true,
943 .auto_agg = true, 1015 .auto_agg = true,
1016 .wd_disable = true,
944 /* the rest are 0 by default */ 1017 /* the rest are 0 by default */
945}; 1018};
1019EXPORT_SYMBOL_GPL(iwlwifi_mod_params);
1020
1021int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops)
1022{
1023 int i;
1024 struct iwl_drv *drv;
1025
1026 mutex_lock(&iwlwifi_opmode_table_mtx);
1027 for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) {
1028 if (strcmp(iwlwifi_opmode_table[i].name, name))
1029 continue;
1030 iwlwifi_opmode_table[i].ops = ops;
1031 list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list)
1032 drv->op_mode = ops->start(drv->trans, drv->cfg,
1033 &drv->fw);
1034 mutex_unlock(&iwlwifi_opmode_table_mtx);
1035 return 0;
1036 }
1037 mutex_unlock(&iwlwifi_opmode_table_mtx);
1038 return -EIO;
1039}
1040EXPORT_SYMBOL_GPL(iwl_opmode_register);
1041
1042void iwl_opmode_deregister(const char *name)
1043{
1044 int i;
1045 struct iwl_drv *drv;
1046
1047 mutex_lock(&iwlwifi_opmode_table_mtx);
1048 for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) {
1049 if (strcmp(iwlwifi_opmode_table[i].name, name))
1050 continue;
1051 iwlwifi_opmode_table[i].ops = NULL;
1052
1053 /* call the stop routine for all devices */
1054 list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list) {
1055 if (drv->op_mode) {
1056 iwl_op_mode_stop(drv->op_mode);
1057 drv->op_mode = NULL;
1058 }
1059 }
1060 mutex_unlock(&iwlwifi_opmode_table_mtx);
1061 return;
1062 }
1063 mutex_unlock(&iwlwifi_opmode_table_mtx);
1064}
1065EXPORT_SYMBOL_GPL(iwl_opmode_deregister);
1066
1067static int __init iwl_drv_init(void)
1068{
1069 int i;
1070
1071 mutex_init(&iwlwifi_opmode_table_mtx);
1072
1073 for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++)
1074 INIT_LIST_HEAD(&iwlwifi_opmode_table[i].drv);
1075
1076 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
1077 pr_info(DRV_COPYRIGHT "\n");
1078
1079 return iwl_pci_register_driver();
1080}
1081module_init(iwl_drv_init);
1082
1083static void __exit iwl_drv_exit(void)
1084{
1085 iwl_pci_unregister_driver();
1086}
1087module_exit(iwl_drv_exit);
946 1088
947#ifdef CONFIG_IWLWIFI_DEBUG 1089#ifdef CONFIG_IWLWIFI_DEBUG
948module_param_named(debug, iwlwifi_mod_params.debug_level, uint, 1090module_param_named(debug, iwlwifi_mod_params.debug_level, uint,
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
new file mode 100644
index 000000000000..f10170fe8799
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
@@ -0,0 +1,903 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62#include <linux/types.h>
63#include <linux/slab.h>
64#include <linux/export.h>
65#include "iwl-modparams.h"
66#include "iwl-eeprom-parse.h"
67
68/* EEPROM offset definitions */
69
70/* indirect access definitions */
71#define ADDRESS_MSK 0x0000FFFF
72#define INDIRECT_TYPE_MSK 0x000F0000
73#define INDIRECT_HOST 0x00010000
74#define INDIRECT_GENERAL 0x00020000
75#define INDIRECT_REGULATORY 0x00030000
76#define INDIRECT_CALIBRATION 0x00040000
77#define INDIRECT_PROCESS_ADJST 0x00050000
78#define INDIRECT_OTHERS 0x00060000
79#define INDIRECT_TXP_LIMIT 0x00070000
80#define INDIRECT_TXP_LIMIT_SIZE 0x00080000
81#define INDIRECT_ADDRESS 0x00100000
82
83/* corresponding link offsets in EEPROM */
84#define EEPROM_LINK_HOST (2*0x64)
85#define EEPROM_LINK_GENERAL (2*0x65)
86#define EEPROM_LINK_REGULATORY (2*0x66)
87#define EEPROM_LINK_CALIBRATION (2*0x67)
88#define EEPROM_LINK_PROCESS_ADJST (2*0x68)
89#define EEPROM_LINK_OTHERS (2*0x69)
90#define EEPROM_LINK_TXP_LIMIT (2*0x6a)
91#define EEPROM_LINK_TXP_LIMIT_SIZE (2*0x6b)
92
93/* General */
94#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
95#define EEPROM_SUBSYSTEM_ID (2*0x0A) /* 2 bytes */
96#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
97#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
98#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
99#define EEPROM_VERSION (2*0x44) /* 2 bytes */
100#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
101#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
102#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
103#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
104
105/* calibration */
106struct iwl_eeprom_calib_hdr {
107 u8 version;
108 u8 pa_type;
109 __le16 voltage;
110} __packed;
111
112#define EEPROM_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION)
113#define EEPROM_XTAL ((2*0x128) | EEPROM_CALIB_ALL)
114
115/* temperature */
116#define EEPROM_KELVIN_TEMPERATURE ((2*0x12A) | EEPROM_CALIB_ALL)
117#define EEPROM_RAW_TEMPERATURE ((2*0x12B) | EEPROM_CALIB_ALL)
118
119/*
120 * EEPROM bands
121 * These are the channel numbers from each band in the order
122 * that they are stored in the EEPROM band information. Note
123 * that EEPROM bands aren't the same as mac80211 bands, and
124 * there are even special "ht40 bands" in the EEPROM.
125 */
126static const u8 iwl_eeprom_band_1[14] = { /* 2.4 GHz */
127 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
128};
129
130static const u8 iwl_eeprom_band_2[] = { /* 4915-5080MHz */
131 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
132};
133
134static const u8 iwl_eeprom_band_3[] = { /* 5170-5320MHz */
135 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
136};
137
138static const u8 iwl_eeprom_band_4[] = { /* 5500-5700MHz */
139 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
140};
141
142static const u8 iwl_eeprom_band_5[] = { /* 5725-5825MHz */
143 145, 149, 153, 157, 161, 165
144};
145
146static const u8 iwl_eeprom_band_6[] = { /* 2.4 ht40 channel */
147 1, 2, 3, 4, 5, 6, 7
148};
149
150static const u8 iwl_eeprom_band_7[] = { /* 5.2 ht40 channel */
151 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
152};
153
154#define IWL_NUM_CHANNELS (ARRAY_SIZE(iwl_eeprom_band_1) + \
155 ARRAY_SIZE(iwl_eeprom_band_2) + \
156 ARRAY_SIZE(iwl_eeprom_band_3) + \
157 ARRAY_SIZE(iwl_eeprom_band_4) + \
158 ARRAY_SIZE(iwl_eeprom_band_5))
159
160/* rate data (static) */
161static struct ieee80211_rate iwl_cfg80211_rates[] = {
162 { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, },
163 { .bitrate = 2 * 10, .hw_value = 1, .hw_value_short = 1,
164 .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
165 { .bitrate = 5.5 * 10, .hw_value = 2, .hw_value_short = 2,
166 .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
167 { .bitrate = 11 * 10, .hw_value = 3, .hw_value_short = 3,
168 .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
169 { .bitrate = 6 * 10, .hw_value = 4, .hw_value_short = 4, },
170 { .bitrate = 9 * 10, .hw_value = 5, .hw_value_short = 5, },
171 { .bitrate = 12 * 10, .hw_value = 6, .hw_value_short = 6, },
172 { .bitrate = 18 * 10, .hw_value = 7, .hw_value_short = 7, },
173 { .bitrate = 24 * 10, .hw_value = 8, .hw_value_short = 8, },
174 { .bitrate = 36 * 10, .hw_value = 9, .hw_value_short = 9, },
175 { .bitrate = 48 * 10, .hw_value = 10, .hw_value_short = 10, },
176 { .bitrate = 54 * 10, .hw_value = 11, .hw_value_short = 11, },
177};
178#define RATES_24_OFFS 0
179#define N_RATES_24 ARRAY_SIZE(iwl_cfg80211_rates)
180#define RATES_52_OFFS 4
181#define N_RATES_52 (N_RATES_24 - RATES_52_OFFS)
182
183/* EEPROM reading functions */
184
185static u16 iwl_eeprom_query16(const u8 *eeprom, size_t eeprom_size, int offset)
186{
187 if (WARN_ON(offset + sizeof(u16) > eeprom_size))
188 return 0;
189 return le16_to_cpup((__le16 *)(eeprom + offset));
190}
191
192static u32 eeprom_indirect_address(const u8 *eeprom, size_t eeprom_size,
193 u32 address)
194{
195 u16 offset = 0;
196
197 if ((address & INDIRECT_ADDRESS) == 0)
198 return address;
199
200 switch (address & INDIRECT_TYPE_MSK) {
201 case INDIRECT_HOST:
202 offset = iwl_eeprom_query16(eeprom, eeprom_size,
203 EEPROM_LINK_HOST);
204 break;
205 case INDIRECT_GENERAL:
206 offset = iwl_eeprom_query16(eeprom, eeprom_size,
207 EEPROM_LINK_GENERAL);
208 break;
209 case INDIRECT_REGULATORY:
210 offset = iwl_eeprom_query16(eeprom, eeprom_size,
211 EEPROM_LINK_REGULATORY);
212 break;
213 case INDIRECT_TXP_LIMIT:
214 offset = iwl_eeprom_query16(eeprom, eeprom_size,
215 EEPROM_LINK_TXP_LIMIT);
216 break;
217 case INDIRECT_TXP_LIMIT_SIZE:
218 offset = iwl_eeprom_query16(eeprom, eeprom_size,
219 EEPROM_LINK_TXP_LIMIT_SIZE);
220 break;
221 case INDIRECT_CALIBRATION:
222 offset = iwl_eeprom_query16(eeprom, eeprom_size,
223 EEPROM_LINK_CALIBRATION);
224 break;
225 case INDIRECT_PROCESS_ADJST:
226 offset = iwl_eeprom_query16(eeprom, eeprom_size,
227 EEPROM_LINK_PROCESS_ADJST);
228 break;
229 case INDIRECT_OTHERS:
230 offset = iwl_eeprom_query16(eeprom, eeprom_size,
231 EEPROM_LINK_OTHERS);
232 break;
233 default:
234 WARN_ON(1);
235 break;
236 }
237
238 /* translate the offset from words to byte */
239 return (address & ADDRESS_MSK) + (offset << 1);
240}
241
242static const u8 *iwl_eeprom_query_addr(const u8 *eeprom, size_t eeprom_size,
243 u32 offset)
244{
245 u32 address = eeprom_indirect_address(eeprom, eeprom_size, offset);
246
247 if (WARN_ON(address >= eeprom_size))
248 return NULL;
249
250 return &eeprom[address];
251}
252
253static int iwl_eeprom_read_calib(const u8 *eeprom, size_t eeprom_size,
254 struct iwl_eeprom_data *data)
255{
256 struct iwl_eeprom_calib_hdr *hdr;
257
258 hdr = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size,
259 EEPROM_CALIB_ALL);
260 if (!hdr)
261 return -ENODATA;
262 data->calib_version = hdr->version;
263 data->calib_voltage = hdr->voltage;
264
265 return 0;
266}
267
268/**
269 * enum iwl_eeprom_channel_flags - channel flags in EEPROM
270 * @EEPROM_CHANNEL_VALID: channel is usable for this SKU/geo
271 * @EEPROM_CHANNEL_IBSS: usable as an IBSS channel
272 * @EEPROM_CHANNEL_ACTIVE: active scanning allowed
273 * @EEPROM_CHANNEL_RADAR: radar detection required
274 * @EEPROM_CHANNEL_WIDE: 20 MHz channel okay (?)
275 * @EEPROM_CHANNEL_DFS: dynamic freq selection candidate
276 */
277enum iwl_eeprom_channel_flags {
278 EEPROM_CHANNEL_VALID = BIT(0),
279 EEPROM_CHANNEL_IBSS = BIT(1),
280 EEPROM_CHANNEL_ACTIVE = BIT(3),
281 EEPROM_CHANNEL_RADAR = BIT(4),
282 EEPROM_CHANNEL_WIDE = BIT(5),
283 EEPROM_CHANNEL_DFS = BIT(7),
284};
285
286/**
287 * struct iwl_eeprom_channel - EEPROM channel data
288 * @flags: %EEPROM_CHANNEL_* flags
289 * @max_power_avg: max power (in dBm) on this channel, at most 31 dBm
290 */
291struct iwl_eeprom_channel {
292 u8 flags;
293 s8 max_power_avg;
294} __packed;
295
296
297enum iwl_eeprom_enhanced_txpwr_flags {
298 IWL_EEPROM_ENH_TXP_FL_VALID = BIT(0),
299 IWL_EEPROM_ENH_TXP_FL_BAND_52G = BIT(1),
300 IWL_EEPROM_ENH_TXP_FL_OFDM = BIT(2),
301 IWL_EEPROM_ENH_TXP_FL_40MHZ = BIT(3),
302 IWL_EEPROM_ENH_TXP_FL_HT_AP = BIT(4),
303 IWL_EEPROM_ENH_TXP_FL_RES1 = BIT(5),
304 IWL_EEPROM_ENH_TXP_FL_RES2 = BIT(6),
305 IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE = BIT(7),
306};
307
308/**
309 * iwl_eeprom_enhanced_txpwr structure
310 * @flags: entry flags
311 * @channel: channel number
312 * @chain_a_max_pwr: chain a max power in 1/2 dBm
313 * @chain_b_max_pwr: chain b max power in 1/2 dBm
314 * @chain_c_max_pwr: chain c max power in 1/2 dBm
315 * @delta_20_in_40: 20-in-40 deltas (hi/lo)
316 * @mimo2_max_pwr: mimo2 max power in 1/2 dBm
317 * @mimo3_max_pwr: mimo3 max power in 1/2 dBm
318 *
319 * This structure presents the enhanced regulatory tx power limit layout
320 * in an EEPROM image.
321 */
322struct iwl_eeprom_enhanced_txpwr {
323 u8 flags;
324 u8 channel;
325 s8 chain_a_max;
326 s8 chain_b_max;
327 s8 chain_c_max;
328 u8 delta_20_in_40;
329 s8 mimo2_max;
330 s8 mimo3_max;
331} __packed;
332
333static s8 iwl_get_max_txpwr_half_dbm(const struct iwl_eeprom_data *data,
334 struct iwl_eeprom_enhanced_txpwr *txp)
335{
336 s8 result = 0; /* (.5 dBm) */
337
338 /* Take the highest tx power from any valid chains */
339 if (data->valid_tx_ant & ANT_A && txp->chain_a_max > result)
340 result = txp->chain_a_max;
341
342 if (data->valid_tx_ant & ANT_B && txp->chain_b_max > result)
343 result = txp->chain_b_max;
344
345 if (data->valid_tx_ant & ANT_C && txp->chain_c_max > result)
346 result = txp->chain_c_max;
347
348 if ((data->valid_tx_ant == ANT_AB ||
349 data->valid_tx_ant == ANT_BC ||
350 data->valid_tx_ant == ANT_AC) && txp->mimo2_max > result)
351 result = txp->mimo2_max;
352
353 if (data->valid_tx_ant == ANT_ABC && txp->mimo3_max > result)
354 result = txp->mimo3_max;
355
356 return result;
357}
358
359#define EEPROM_TXP_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT)
360#define EEPROM_TXP_ENTRY_LEN sizeof(struct iwl_eeprom_enhanced_txpwr)
361#define EEPROM_TXP_SZ_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT_SIZE)
362
363#define TXP_CHECK_AND_PRINT(x) \
364 ((txp->flags & IWL_EEPROM_ENH_TXP_FL_##x) ? # x " " : "")
365
366static void
367iwl_eeprom_enh_txp_read_element(struct iwl_eeprom_data *data,
368 struct iwl_eeprom_enhanced_txpwr *txp,
369 int n_channels, s8 max_txpower_avg)
370{
371 int ch_idx;
372 enum ieee80211_band band;
373
374 band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ?
375 IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
376
377 for (ch_idx = 0; ch_idx < n_channels; ch_idx++) {
378 struct ieee80211_channel *chan = &data->channels[ch_idx];
379
380 /* update matching channel or from common data only */
381 if (txp->channel != 0 && chan->hw_value != txp->channel)
382 continue;
383
384 /* update matching band only */
385 if (band != chan->band)
386 continue;
387
388 if (chan->max_power < max_txpower_avg &&
389 !(txp->flags & IWL_EEPROM_ENH_TXP_FL_40MHZ))
390 chan->max_power = max_txpower_avg;
391 }
392}
393
394static void iwl_eeprom_enhanced_txpower(struct device *dev,
395 struct iwl_eeprom_data *data,
396 const u8 *eeprom, size_t eeprom_size,
397 int n_channels)
398{
399 struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
400 int idx, entries;
401 __le16 *txp_len;
402 s8 max_txp_avg_halfdbm;
403
404 BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
405
406 /* the length is in 16-bit words, but we want entries */
407 txp_len = (__le16 *)iwl_eeprom_query_addr(eeprom, eeprom_size,
408 EEPROM_TXP_SZ_OFFS);
409 entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
410
411 txp_array = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size,
412 EEPROM_TXP_OFFS);
413
414 for (idx = 0; idx < entries; idx++) {
415 txp = &txp_array[idx];
416 /* skip invalid entries */
417 if (!(txp->flags & IWL_EEPROM_ENH_TXP_FL_VALID))
418 continue;
419
420 IWL_DEBUG_EEPROM(dev, "%s %d:\t %s%s%s%s%s%s%s%s (0x%02x)\n",
421 (txp->channel && (txp->flags &
422 IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE)) ?
423 "Common " : (txp->channel) ?
424 "Channel" : "Common",
425 (txp->channel),
426 TXP_CHECK_AND_PRINT(VALID),
427 TXP_CHECK_AND_PRINT(BAND_52G),
428 TXP_CHECK_AND_PRINT(OFDM),
429 TXP_CHECK_AND_PRINT(40MHZ),
430 TXP_CHECK_AND_PRINT(HT_AP),
431 TXP_CHECK_AND_PRINT(RES1),
432 TXP_CHECK_AND_PRINT(RES2),
433 TXP_CHECK_AND_PRINT(COMMON_TYPE),
434 txp->flags);
435 IWL_DEBUG_EEPROM(dev,
436 "\t\t chain_A: 0x%02x chain_B: 0X%02x chain_C: 0X%02x\n",
437 txp->chain_a_max, txp->chain_b_max,
438 txp->chain_c_max);
439 IWL_DEBUG_EEPROM(dev,
440 "\t\t MIMO2: 0x%02x MIMO3: 0x%02x High 20_on_40: 0x%02x Low 20_on_40: 0x%02x\n",
441 txp->mimo2_max, txp->mimo3_max,
442 ((txp->delta_20_in_40 & 0xf0) >> 4),
443 (txp->delta_20_in_40 & 0x0f));
444
445 max_txp_avg_halfdbm = iwl_get_max_txpwr_half_dbm(data, txp);
446
447 iwl_eeprom_enh_txp_read_element(data, txp, n_channels,
448 DIV_ROUND_UP(max_txp_avg_halfdbm, 2));
449
450 if (max_txp_avg_halfdbm > data->max_tx_pwr_half_dbm)
451 data->max_tx_pwr_half_dbm = max_txp_avg_halfdbm;
452 }
453}
454
455static void iwl_init_band_reference(const struct iwl_cfg *cfg,
456 const u8 *eeprom, size_t eeprom_size,
457 int eeprom_band, int *eeprom_ch_count,
458 const struct iwl_eeprom_channel **ch_info,
459 const u8 **eeprom_ch_array)
460{
461 u32 offset = cfg->eeprom_params->regulatory_bands[eeprom_band - 1];
462
463 offset |= INDIRECT_ADDRESS | INDIRECT_REGULATORY;
464
465 *ch_info = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size, offset);
466
467 switch (eeprom_band) {
468 case 1: /* 2.4GHz band */
469 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
470 *eeprom_ch_array = iwl_eeprom_band_1;
471 break;
472 case 2: /* 4.9GHz band */
473 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
474 *eeprom_ch_array = iwl_eeprom_band_2;
475 break;
476 case 3: /* 5.2GHz band */
477 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
478 *eeprom_ch_array = iwl_eeprom_band_3;
479 break;
480 case 4: /* 5.5GHz band */
481 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
482 *eeprom_ch_array = iwl_eeprom_band_4;
483 break;
484 case 5: /* 5.7GHz band */
485 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
486 *eeprom_ch_array = iwl_eeprom_band_5;
487 break;
488 case 6: /* 2.4GHz ht40 channels */
489 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6);
490 *eeprom_ch_array = iwl_eeprom_band_6;
491 break;
492 case 7: /* 5 GHz ht40 channels */
493 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7);
494 *eeprom_ch_array = iwl_eeprom_band_7;
495 break;
496 default:
497 *eeprom_ch_count = 0;
498 *eeprom_ch_array = NULL;
499 WARN_ON(1);
500 }
501}
502
503#define CHECK_AND_PRINT(x) \
504 ((eeprom_ch->flags & EEPROM_CHANNEL_##x) ? # x " " : "")
505
506static void iwl_mod_ht40_chan_info(struct device *dev,
507 struct iwl_eeprom_data *data, int n_channels,
508 enum ieee80211_band band, u16 channel,
509 const struct iwl_eeprom_channel *eeprom_ch,
510 u8 clear_ht40_extension_channel)
511{
512 struct ieee80211_channel *chan = NULL;
513 int i;
514
515 for (i = 0; i < n_channels; i++) {
516 if (data->channels[i].band != band)
517 continue;
518 if (data->channels[i].hw_value != channel)
519 continue;
520 chan = &data->channels[i];
521 break;
522 }
523
524 if (!chan)
525 return;
526
527 IWL_DEBUG_EEPROM(dev,
528 "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
529 channel,
530 band == IEEE80211_BAND_5GHZ ? "5.2" : "2.4",
531 CHECK_AND_PRINT(IBSS),
532 CHECK_AND_PRINT(ACTIVE),
533 CHECK_AND_PRINT(RADAR),
534 CHECK_AND_PRINT(WIDE),
535 CHECK_AND_PRINT(DFS),
536 eeprom_ch->flags,
537 eeprom_ch->max_power_avg,
538 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) &&
539 !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? ""
540 : "not ");
541
542 if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
543 chan->flags &= ~clear_ht40_extension_channel;
544}
545
546#define CHECK_AND_PRINT_I(x) \
547 ((eeprom_ch_info[ch_idx].flags & EEPROM_CHANNEL_##x) ? # x " " : "")
548
549static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
550 struct iwl_eeprom_data *data,
551 const u8 *eeprom, size_t eeprom_size)
552{
553 int band, ch_idx;
554 const struct iwl_eeprom_channel *eeprom_ch_info;
555 const u8 *eeprom_ch_array;
556 int eeprom_ch_count;
557 int n_channels = 0;
558
559 /*
560 * Loop through the 5 EEPROM bands and add them to the parse list
561 */
562 for (band = 1; band <= 5; band++) {
563 struct ieee80211_channel *channel;
564
565 iwl_init_band_reference(cfg, eeprom, eeprom_size, band,
566 &eeprom_ch_count, &eeprom_ch_info,
567 &eeprom_ch_array);
568
569 /* Loop through each band adding each of the channels */
570 for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) {
571 const struct iwl_eeprom_channel *eeprom_ch;
572
573 eeprom_ch = &eeprom_ch_info[ch_idx];
574
575 if (!(eeprom_ch->flags & EEPROM_CHANNEL_VALID)) {
576 IWL_DEBUG_EEPROM(dev,
577 "Ch. %d Flags %x [%sGHz] - No traffic\n",
578 eeprom_ch_array[ch_idx],
579 eeprom_ch_info[ch_idx].flags,
580 (band != 1) ? "5.2" : "2.4");
581 continue;
582 }
583
584 channel = &data->channels[n_channels];
585 n_channels++;
586
587 channel->hw_value = eeprom_ch_array[ch_idx];
588 channel->band = (band == 1) ? IEEE80211_BAND_2GHZ
589 : IEEE80211_BAND_5GHZ;
590 channel->center_freq =
591 ieee80211_channel_to_frequency(
592 channel->hw_value, channel->band);
593
594 /* set no-HT40, will enable as appropriate later */
595 channel->flags = IEEE80211_CHAN_NO_HT40;
596
597 if (!(eeprom_ch->flags & EEPROM_CHANNEL_IBSS))
598 channel->flags |= IEEE80211_CHAN_NO_IBSS;
599
600 if (!(eeprom_ch->flags & EEPROM_CHANNEL_ACTIVE))
601 channel->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
602
603 if (eeprom_ch->flags & EEPROM_CHANNEL_RADAR)
604 channel->flags |= IEEE80211_CHAN_RADAR;
605
606 /* Initialize regulatory-based run-time data */
607 channel->max_power =
608 eeprom_ch_info[ch_idx].max_power_avg;
609 IWL_DEBUG_EEPROM(dev,
610 "Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
611 channel->hw_value,
612 (band != 1) ? "5.2" : "2.4",
613 CHECK_AND_PRINT_I(VALID),
614 CHECK_AND_PRINT_I(IBSS),
615 CHECK_AND_PRINT_I(ACTIVE),
616 CHECK_AND_PRINT_I(RADAR),
617 CHECK_AND_PRINT_I(WIDE),
618 CHECK_AND_PRINT_I(DFS),
619 eeprom_ch_info[ch_idx].flags,
620 eeprom_ch_info[ch_idx].max_power_avg,
621 ((eeprom_ch_info[ch_idx].flags &
622 EEPROM_CHANNEL_IBSS) &&
623 !(eeprom_ch_info[ch_idx].flags &
624 EEPROM_CHANNEL_RADAR))
625 ? "" : "not ");
626 }
627 }
628
629 if (cfg->eeprom_params->enhanced_txpower) {
630 /*
631 * for newer device (6000 series and up)
632 * EEPROM contain enhanced tx power information
633 * driver need to process addition information
634 * to determine the max channel tx power limits
635 */
636 iwl_eeprom_enhanced_txpower(dev, data, eeprom, eeprom_size,
637 n_channels);
638 } else {
639 /* All others use data from channel map */
640 int i;
641
642 data->max_tx_pwr_half_dbm = -128;
643
644 for (i = 0; i < n_channels; i++)
645 data->max_tx_pwr_half_dbm =
646 max_t(s8, data->max_tx_pwr_half_dbm,
647 data->channels[i].max_power * 2);
648 }
649
650 /* Check if we do have HT40 channels */
651 if (cfg->eeprom_params->regulatory_bands[5] ==
652 EEPROM_REGULATORY_BAND_NO_HT40 &&
653 cfg->eeprom_params->regulatory_bands[6] ==
654 EEPROM_REGULATORY_BAND_NO_HT40)
655 return n_channels;
656
657 /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
658 for (band = 6; band <= 7; band++) {
659 enum ieee80211_band ieeeband;
660
661 iwl_init_band_reference(cfg, eeprom, eeprom_size, band,
662 &eeprom_ch_count, &eeprom_ch_info,
663 &eeprom_ch_array);
664
665 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
666 ieeeband = (band == 6) ? IEEE80211_BAND_2GHZ
667 : IEEE80211_BAND_5GHZ;
668
669 /* Loop through each band adding each of the channels */
670 for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) {
671 /* Set up driver's info for lower half */
672 iwl_mod_ht40_chan_info(dev, data, n_channels, ieeeband,
673 eeprom_ch_array[ch_idx],
674 &eeprom_ch_info[ch_idx],
675 IEEE80211_CHAN_NO_HT40PLUS);
676
677 /* Set up driver's info for upper half */
678 iwl_mod_ht40_chan_info(dev, data, n_channels, ieeeband,
679 eeprom_ch_array[ch_idx] + 4,
680 &eeprom_ch_info[ch_idx],
681 IEEE80211_CHAN_NO_HT40MINUS);
682 }
683 }
684
685 return n_channels;
686}
687
688static int iwl_init_sband_channels(struct iwl_eeprom_data *data,
689 struct ieee80211_supported_band *sband,
690 int n_channels, enum ieee80211_band band)
691{
692 struct ieee80211_channel *chan = &data->channels[0];
693 int n = 0, idx = 0;
694
695 while (chan->band != band && idx < n_channels)
696 chan = &data->channels[++idx];
697
698 sband->channels = &data->channels[idx];
699
700 while (chan->band == band && idx < n_channels) {
701 chan = &data->channels[++idx];
702 n++;
703 }
704
705 sband->n_channels = n;
706
707 return n;
708}
709
710#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
711#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
712
713static void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
714 struct iwl_eeprom_data *data,
715 struct ieee80211_sta_ht_cap *ht_info,
716 enum ieee80211_band band)
717{
718 int max_bit_rate = 0;
719 u8 rx_chains;
720 u8 tx_chains;
721
722 tx_chains = hweight8(data->valid_tx_ant);
723 if (cfg->rx_with_siso_diversity)
724 rx_chains = 1;
725 else
726 rx_chains = hweight8(data->valid_rx_ant);
727
728 if (!(data->sku & EEPROM_SKU_CAP_11N_ENABLE) || !cfg->ht_params) {
729 ht_info->ht_supported = false;
730 return;
731 }
732
733 ht_info->ht_supported = true;
734 ht_info->cap = 0;
735
736 if (iwlwifi_mod_params.amsdu_size_8K)
737 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
738
739 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
740 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
741
742 ht_info->mcs.rx_mask[0] = 0xFF;
743 if (rx_chains >= 2)
744 ht_info->mcs.rx_mask[1] = 0xFF;
745 if (rx_chains >= 3)
746 ht_info->mcs.rx_mask[2] = 0xFF;
747
748 if (cfg->ht_params->ht_greenfield_support)
749 ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
750 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
751
752 max_bit_rate = MAX_BIT_RATE_20_MHZ;
753
754 if (cfg->ht_params->ht40_bands & BIT(band)) {
755 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
756 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
757 ht_info->mcs.rx_mask[4] = 0x01;
758 max_bit_rate = MAX_BIT_RATE_40_MHZ;
759 }
760
761 /* Highest supported Rx data rate */
762 max_bit_rate *= rx_chains;
763 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
764 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
765
766 /* Tx MCS capabilities */
767 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
768 if (tx_chains != rx_chains) {
769 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
770 ht_info->mcs.tx_params |= ((tx_chains - 1) <<
771 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
772 }
773}
774
775static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
776 struct iwl_eeprom_data *data,
777 const u8 *eeprom, size_t eeprom_size)
778{
779 int n_channels = iwl_init_channel_map(dev, cfg, data,
780 eeprom, eeprom_size);
781 int n_used = 0;
782 struct ieee80211_supported_band *sband;
783
784 sband = &data->bands[IEEE80211_BAND_2GHZ];
785 sband->band = IEEE80211_BAND_2GHZ;
786 sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
787 sband->n_bitrates = N_RATES_24;
788 n_used += iwl_init_sband_channels(data, sband, n_channels,
789 IEEE80211_BAND_2GHZ);
790 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ);
791
792 sband = &data->bands[IEEE80211_BAND_5GHZ];
793 sband->band = IEEE80211_BAND_5GHZ;
794 sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
795 sband->n_bitrates = N_RATES_52;
796 n_used += iwl_init_sband_channels(data, sband, n_channels,
797 IEEE80211_BAND_5GHZ);
798 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ);
799
800 if (n_channels != n_used)
801 IWL_ERR_DEV(dev, "EEPROM: used only %d of %d channels\n",
802 n_used, n_channels);
803}
804
805/* EEPROM data functions */
806
807struct iwl_eeprom_data *
808iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
809 const u8 *eeprom, size_t eeprom_size)
810{
811 struct iwl_eeprom_data *data;
812 const void *tmp;
813
814 if (WARN_ON(!cfg || !cfg->eeprom_params))
815 return NULL;
816
817 data = kzalloc(sizeof(*data) +
818 sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS,
819 GFP_KERNEL);
820 if (!data)
821 return NULL;
822
823 /* get MAC address(es) */
824 tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_MAC_ADDRESS);
825 if (!tmp)
826 goto err_free;
827 memcpy(data->hw_addr, tmp, ETH_ALEN);
828 data->n_hw_addrs = iwl_eeprom_query16(eeprom, eeprom_size,
829 EEPROM_NUM_MAC_ADDRESS);
830
831 if (iwl_eeprom_read_calib(eeprom, eeprom_size, data))
832 goto err_free;
833
834 tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_XTAL);
835 if (!tmp)
836 goto err_free;
837 memcpy(data->xtal_calib, tmp, sizeof(data->xtal_calib));
838
839 tmp = iwl_eeprom_query_addr(eeprom, eeprom_size,
840 EEPROM_RAW_TEMPERATURE);
841 if (!tmp)
842 goto err_free;
843 data->raw_temperature = *(__le16 *)tmp;
844
845 tmp = iwl_eeprom_query_addr(eeprom, eeprom_size,
846 EEPROM_KELVIN_TEMPERATURE);
847 if (!tmp)
848 goto err_free;
849 data->kelvin_temperature = *(__le16 *)tmp;
850 data->kelvin_voltage = *((__le16 *)tmp + 1);
851
852 data->radio_cfg = iwl_eeprom_query16(eeprom, eeprom_size,
853 EEPROM_RADIO_CONFIG);
854 data->sku = iwl_eeprom_query16(eeprom, eeprom_size,
855 EEPROM_SKU_CAP);
856 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
857 data->sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
858
859 data->eeprom_version = iwl_eeprom_query16(eeprom, eeprom_size,
860 EEPROM_VERSION);
861
862 data->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(data->radio_cfg);
863 data->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(data->radio_cfg);
864
865 /* check overrides (some devices have wrong EEPROM) */
866 if (cfg->valid_tx_ant)
867 data->valid_tx_ant = cfg->valid_tx_ant;
868 if (cfg->valid_rx_ant)
869 data->valid_rx_ant = cfg->valid_rx_ant;
870
871 if (!data->valid_tx_ant || !data->valid_rx_ant) {
872 IWL_ERR_DEV(dev, "invalid antennas (0x%x, 0x%x)\n",
873 data->valid_tx_ant, data->valid_rx_ant);
874 goto err_free;
875 }
876
877 iwl_init_sbands(dev, cfg, data, eeprom, eeprom_size);
878
879 return data;
880 err_free:
881 kfree(data);
882 return NULL;
883}
884EXPORT_SYMBOL_GPL(iwl_parse_eeprom_data);
885
886/* helper functions */
887int iwl_eeprom_check_version(struct iwl_eeprom_data *data,
888 struct iwl_trans *trans)
889{
890 if (data->eeprom_version >= trans->cfg->eeprom_ver ||
891 data->calib_version >= trans->cfg->eeprom_calib_ver) {
892 IWL_INFO(trans, "device EEPROM VER=0x%x, CALIB=0x%x\n",
893 data->eeprom_version, data->calib_version);
894 return 0;
895 }
896
897 IWL_ERR(trans,
898 "Unsupported (too old) EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
899 data->eeprom_version, trans->cfg->eeprom_ver,
900 data->calib_version, trans->cfg->eeprom_calib_ver);
901 return -EINVAL;
902}
903EXPORT_SYMBOL_GPL(iwl_eeprom_check_version);
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
new file mode 100644
index 000000000000..9c07c670a1ce
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
@@ -0,0 +1,138 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62#ifndef __iwl_eeprom_parse_h__
63#define __iwl_eeprom_parse_h__
64
65#include <linux/types.h>
66#include <linux/if_ether.h>
67#include "iwl-trans.h"
68
69/* SKU Capabilities (actual values from EEPROM definition) */
70#define EEPROM_SKU_CAP_BAND_24GHZ (1 << 4)
71#define EEPROM_SKU_CAP_BAND_52GHZ (1 << 5)
72#define EEPROM_SKU_CAP_11N_ENABLE (1 << 6)
73#define EEPROM_SKU_CAP_AMT_ENABLE (1 << 7)
74#define EEPROM_SKU_CAP_IPAN_ENABLE (1 << 8)
75
76/* radio config bits (actual values from EEPROM definition) */
77#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
78#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
79#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
80#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
81#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
82#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
83
84struct iwl_eeprom_data {
85 int n_hw_addrs;
86 u8 hw_addr[ETH_ALEN];
87
88 u16 radio_config;
89
90 u8 calib_version;
91 __le16 calib_voltage;
92
93 __le16 raw_temperature;
94 __le16 kelvin_temperature;
95 __le16 kelvin_voltage;
96 __le16 xtal_calib[2];
97
98 u16 sku;
99 u16 radio_cfg;
100 u16 eeprom_version;
101 s8 max_tx_pwr_half_dbm;
102
103 u8 valid_tx_ant, valid_rx_ant;
104
105 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
106 struct ieee80211_channel channels[];
107};
108
109/**
110 * iwl_parse_eeprom_data - parse EEPROM data and return values
111 *
112 * @dev: device pointer we're parsing for, for debug only
113 * @cfg: device configuration for parsing and overrides
114 * @eeprom: the EEPROM data
115 * @eeprom_size: length of the EEPROM data
116 *
117 * This function parses all EEPROM values we need and then
118 * returns a (newly allocated) struct containing all the
119 * relevant values for driver use. The struct must be freed
120 * later with iwl_free_eeprom_data().
121 */
122struct iwl_eeprom_data *
123iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
124 const u8 *eeprom, size_t eeprom_size);
125
126/**
127 * iwl_free_eeprom_data - free EEPROM data
128 * @data: the data to free
129 */
130static inline void iwl_free_eeprom_data(struct iwl_eeprom_data *data)
131{
132 kfree(data);
133}
134
135int iwl_eeprom_check_version(struct iwl_eeprom_data *data,
136 struct iwl_trans *trans);
137
138#endif /* __iwl_eeprom_parse_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
new file mode 100644
index 000000000000..27c7da3c6ed1
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
@@ -0,0 +1,463 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62#include <linux/types.h>
63#include <linux/slab.h>
64#include <linux/export.h>
65
66#include "iwl-debug.h"
67#include "iwl-eeprom-read.h"
68#include "iwl-io.h"
69#include "iwl-prph.h"
70#include "iwl-csr.h"
71
72/*
73 * EEPROM access time values:
74 *
75 * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
76 * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
77 * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
78 * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
79 */
80#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
81
82#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
83#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
84
85
86/*
87 * The device's EEPROM semaphore prevents conflicts between driver and uCode
88 * when accessing the EEPROM; each access is a series of pulses to/from the
89 * EEPROM chip, not a single event, so even reads could conflict if they
90 * weren't arbitrated by the semaphore.
91 */
92
93#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
94#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
95
96static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
97{
98 u16 count;
99 int ret;
100
101 for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
102 /* Request semaphore */
103 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
104 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
105
106 /* See if we got it */
107 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
108 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
109 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
110 EEPROM_SEM_TIMEOUT);
111 if (ret >= 0) {
112 IWL_DEBUG_EEPROM(trans->dev,
113 "Acquired semaphore after %d tries.\n",
114 count+1);
115 return ret;
116 }
117 }
118
119 return ret;
120}
121
122static void iwl_eeprom_release_semaphore(struct iwl_trans *trans)
123{
124 iwl_clear_bit(trans, CSR_HW_IF_CONFIG_REG,
125 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
126}
127
128static int iwl_eeprom_verify_signature(struct iwl_trans *trans, bool nvm_is_otp)
129{
130 u32 gp = iwl_read32(trans, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
131
132 IWL_DEBUG_EEPROM(trans->dev, "EEPROM signature=0x%08x\n", gp);
133
134 switch (gp) {
135 case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP:
136 if (!nvm_is_otp) {
137 IWL_ERR(trans, "EEPROM with bad signature: 0x%08x\n",
138 gp);
139 return -ENOENT;
140 }
141 return 0;
142 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
143 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
144 if (nvm_is_otp) {
145 IWL_ERR(trans, "OTP with bad signature: 0x%08x\n", gp);
146 return -ENOENT;
147 }
148 return 0;
149 case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP:
150 default:
151 IWL_ERR(trans,
152 "bad EEPROM/OTP signature, type=%s, EEPROM_GP=0x%08x\n",
153 nvm_is_otp ? "OTP" : "EEPROM", gp);
154 return -ENOENT;
155 }
156}
157
158/******************************************************************************
159 *
160 * OTP related functions
161 *
162******************************************************************************/
163
164static void iwl_set_otp_access_absolute(struct iwl_trans *trans)
165{
166 iwl_read32(trans, CSR_OTP_GP_REG);
167
168 iwl_clear_bit(trans, CSR_OTP_GP_REG,
169 CSR_OTP_GP_REG_OTP_ACCESS_MODE);
170}
171
172static int iwl_nvm_is_otp(struct iwl_trans *trans)
173{
174 u32 otpgp;
175
176 /* OTP only valid for CP/PP and after */
177 switch (trans->hw_rev & CSR_HW_REV_TYPE_MSK) {
178 case CSR_HW_REV_TYPE_NONE:
179 IWL_ERR(trans, "Unknown hardware type\n");
180 return -EIO;
181 case CSR_HW_REV_TYPE_5300:
182 case CSR_HW_REV_TYPE_5350:
183 case CSR_HW_REV_TYPE_5100:
184 case CSR_HW_REV_TYPE_5150:
185 return 0;
186 default:
187 otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
188 if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT)
189 return 1;
190 return 0;
191 }
192}
193
194static int iwl_init_otp_access(struct iwl_trans *trans)
195{
196 int ret;
197
198 /* Enable 40MHz radio clock */
199 iwl_write32(trans, CSR_GP_CNTRL,
200 iwl_read32(trans, CSR_GP_CNTRL) |
201 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
202
203 /* wait for clock to be ready */
204 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
205 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
206 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
207 25000);
208 if (ret < 0) {
209 IWL_ERR(trans, "Time out access OTP\n");
210 } else {
211 iwl_set_bits_prph(trans, APMG_PS_CTRL_REG,
212 APMG_PS_CTRL_VAL_RESET_REQ);
213 udelay(5);
214 iwl_clear_bits_prph(trans, APMG_PS_CTRL_REG,
215 APMG_PS_CTRL_VAL_RESET_REQ);
216
217 /*
218 * CSR auto clock gate disable bit -
219 * this is only applicable for HW with OTP shadow RAM
220 */
221 if (trans->cfg->base_params->shadow_ram_support)
222 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
223 CSR_RESET_LINK_PWR_MGMT_DISABLED);
224 }
225 return ret;
226}
227
228static int iwl_read_otp_word(struct iwl_trans *trans, u16 addr,
229 __le16 *eeprom_data)
230{
231 int ret = 0;
232 u32 r;
233 u32 otpgp;
234
235 iwl_write32(trans, CSR_EEPROM_REG,
236 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
237 ret = iwl_poll_bit(trans, CSR_EEPROM_REG,
238 CSR_EEPROM_REG_READ_VALID_MSK,
239 CSR_EEPROM_REG_READ_VALID_MSK,
240 IWL_EEPROM_ACCESS_TIMEOUT);
241 if (ret < 0) {
242 IWL_ERR(trans, "Time out reading OTP[%d]\n", addr);
243 return ret;
244 }
245 r = iwl_read32(trans, CSR_EEPROM_REG);
246 /* check for ECC errors: */
247 otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
248 if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
249 /* stop in this case */
250 /* set the uncorrectable OTP ECC bit for acknowledgement */
251 iwl_set_bit(trans, CSR_OTP_GP_REG,
252 CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
253 IWL_ERR(trans, "Uncorrectable OTP ECC error, abort OTP read\n");
254 return -EINVAL;
255 }
256 if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
257 /* continue in this case */
258 /* set the correctable OTP ECC bit for acknowledgement */
259 iwl_set_bit(trans, CSR_OTP_GP_REG,
260 CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
261 IWL_ERR(trans, "Correctable OTP ECC error, continue read\n");
262 }
263 *eeprom_data = cpu_to_le16(r >> 16);
264 return 0;
265}
266
267/*
268 * iwl_is_otp_empty: check for empty OTP
269 */
270static bool iwl_is_otp_empty(struct iwl_trans *trans)
271{
272 u16 next_link_addr = 0;
273 __le16 link_value;
274 bool is_empty = false;
275
276 /* locate the beginning of OTP link list */
277 if (!iwl_read_otp_word(trans, next_link_addr, &link_value)) {
278 if (!link_value) {
279 IWL_ERR(trans, "OTP is empty\n");
280 is_empty = true;
281 }
282 } else {
283 IWL_ERR(trans, "Unable to read first block of OTP list.\n");
284 is_empty = true;
285 }
286
287 return is_empty;
288}
289
290
291/*
292 * iwl_find_otp_image: find EEPROM image in OTP
293 * finding the OTP block that contains the EEPROM image.
294 * the last valid block on the link list (the block _before_ the last block)
295 * is the block we should read and used to configure the device.
296 * If all the available OTP blocks are full, the last block will be the block
297 * we should read and used to configure the device.
298 * only perform this operation if shadow RAM is disabled
299 */
300static int iwl_find_otp_image(struct iwl_trans *trans,
301 u16 *validblockaddr)
302{
303 u16 next_link_addr = 0, valid_addr;
304 __le16 link_value = 0;
305 int usedblocks = 0;
306
307 /* set addressing mode to absolute to traverse the link list */
308 iwl_set_otp_access_absolute(trans);
309
310 /* checking for empty OTP or error */
311 if (iwl_is_otp_empty(trans))
312 return -EINVAL;
313
314 /*
315 * start traverse link list
316 * until reach the max number of OTP blocks
317 * different devices have different number of OTP blocks
318 */
319 do {
320 /* save current valid block address
321 * check for more block on the link list
322 */
323 valid_addr = next_link_addr;
324 next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
325 IWL_DEBUG_EEPROM(trans->dev, "OTP blocks %d addr 0x%x\n",
326 usedblocks, next_link_addr);
327 if (iwl_read_otp_word(trans, next_link_addr, &link_value))
328 return -EINVAL;
329 if (!link_value) {
330 /*
331 * reach the end of link list, return success and
332 * set address point to the starting address
333 * of the image
334 */
335 *validblockaddr = valid_addr;
336 /* skip first 2 bytes (link list pointer) */
337 *validblockaddr += 2;
338 return 0;
339 }
340 /* more in the link list, continue */
341 usedblocks++;
342 } while (usedblocks <= trans->cfg->base_params->max_ll_items);
343
344 /* OTP has no valid blocks */
345 IWL_DEBUG_EEPROM(trans->dev, "OTP has no valid blocks\n");
346 return -EINVAL;
347}
348
349/**
350 * iwl_read_eeprom - read EEPROM contents
351 *
352 * Load the EEPROM contents from adapter and return it
353 * and its size.
354 *
355 * NOTE: This routine uses the non-debug IO access functions.
356 */
357int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size)
358{
359 __le16 *e;
360 u32 gp = iwl_read32(trans, CSR_EEPROM_GP);
361 int sz;
362 int ret;
363 u16 addr;
364 u16 validblockaddr = 0;
365 u16 cache_addr = 0;
366 int nvm_is_otp;
367
368 if (!eeprom || !eeprom_size)
369 return -EINVAL;
370
371 nvm_is_otp = iwl_nvm_is_otp(trans);
372 if (nvm_is_otp < 0)
373 return nvm_is_otp;
374
375 sz = trans->cfg->base_params->eeprom_size;
376 IWL_DEBUG_EEPROM(trans->dev, "NVM size = %d\n", sz);
377
378 e = kmalloc(sz, GFP_KERNEL);
379 if (!e)
380 return -ENOMEM;
381
382 ret = iwl_eeprom_verify_signature(trans, nvm_is_otp);
383 if (ret < 0) {
384 IWL_ERR(trans, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
385 goto err_free;
386 }
387
388 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
389 ret = iwl_eeprom_acquire_semaphore(trans);
390 if (ret < 0) {
391 IWL_ERR(trans, "Failed to acquire EEPROM semaphore.\n");
392 goto err_free;
393 }
394
395 if (nvm_is_otp) {
396 ret = iwl_init_otp_access(trans);
397 if (ret) {
398 IWL_ERR(trans, "Failed to initialize OTP access.\n");
399 goto err_unlock;
400 }
401
402 iwl_write32(trans, CSR_EEPROM_GP,
403 iwl_read32(trans, CSR_EEPROM_GP) &
404 ~CSR_EEPROM_GP_IF_OWNER_MSK);
405
406 iwl_set_bit(trans, CSR_OTP_GP_REG,
407 CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
408 CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
409 /* traversing the linked list if no shadow ram supported */
410 if (!trans->cfg->base_params->shadow_ram_support) {
411 ret = iwl_find_otp_image(trans, &validblockaddr);
412 if (ret)
413 goto err_unlock;
414 }
415 for (addr = validblockaddr; addr < validblockaddr + sz;
416 addr += sizeof(u16)) {
417 __le16 eeprom_data;
418
419 ret = iwl_read_otp_word(trans, addr, &eeprom_data);
420 if (ret)
421 goto err_unlock;
422 e[cache_addr / 2] = eeprom_data;
423 cache_addr += sizeof(u16);
424 }
425 } else {
426 /* eeprom is an array of 16bit values */
427 for (addr = 0; addr < sz; addr += sizeof(u16)) {
428 u32 r;
429
430 iwl_write32(trans, CSR_EEPROM_REG,
431 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
432
433 ret = iwl_poll_bit(trans, CSR_EEPROM_REG,
434 CSR_EEPROM_REG_READ_VALID_MSK,
435 CSR_EEPROM_REG_READ_VALID_MSK,
436 IWL_EEPROM_ACCESS_TIMEOUT);
437 if (ret < 0) {
438 IWL_ERR(trans,
439 "Time out reading EEPROM[%d]\n", addr);
440 goto err_unlock;
441 }
442 r = iwl_read32(trans, CSR_EEPROM_REG);
443 e[addr / 2] = cpu_to_le16(r >> 16);
444 }
445 }
446
447 IWL_DEBUG_EEPROM(trans->dev, "NVM Type: %s\n",
448 nvm_is_otp ? "OTP" : "EEPROM");
449
450 iwl_eeprom_release_semaphore(trans);
451
452 *eeprom_size = sz;
453 *eeprom = (u8 *)e;
454 return 0;
455
456 err_unlock:
457 iwl_eeprom_release_semaphore(trans);
458 err_free:
459 kfree(e);
460
461 return ret;
462}
463EXPORT_SYMBOL_GPL(iwl_read_eeprom);
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
new file mode 100644
index 000000000000..1337c9d36fee
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
@@ -0,0 +1,70 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_eeprom_h__
64#define __iwl_eeprom_h__
65
66#include "iwl-trans.h"
67
68int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size);
69
70#endif /* __iwl_eeprom_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
deleted file mode 100644
index b8e2b223ac36..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ /dev/null
@@ -1,1148 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63
64#include <linux/kernel.h>
65#include <linux/module.h>
66#include <linux/slab.h>
67#include <linux/init.h>
68
69#include <net/mac80211.h>
70
71#include "iwl-dev.h"
72#include "iwl-debug.h"
73#include "iwl-agn.h"
74#include "iwl-eeprom.h"
75#include "iwl-io.h"
76#include "iwl-prph.h"
77
78/************************** EEPROM BANDS ****************************
79 *
80 * The iwl_eeprom_band definitions below provide the mapping from the
81 * EEPROM contents to the specific channel number supported for each
82 * band.
83 *
84 * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3
85 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
86 * The specific geography and calibration information for that channel
87 * is contained in the eeprom map itself.
88 *
89 * During init, we copy the eeprom information and channel map
90 * information into priv->channel_info_24/52 and priv->channel_map_24/52
91 *
92 * channel_map_24/52 provides the index in the channel_info array for a
93 * given channel. We have to have two separate maps as there is channel
94 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
95 * band_2
96 *
97 * A value of 0xff stored in the channel_map indicates that the channel
98 * is not supported by the hardware at all.
99 *
100 * A value of 0xfe in the channel_map indicates that the channel is not
101 * valid for Tx with the current hardware. This means that
102 * while the system can tune and receive on a given channel, it may not
103 * be able to associate or transmit any frames on that
104 * channel. There is no corresponding channel information for that
105 * entry.
106 *
107 *********************************************************************/
108
109/* 2.4 GHz */
110const u8 iwl_eeprom_band_1[14] = {
111 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
112};
113
114/* 5.2 GHz bands */
115static const u8 iwl_eeprom_band_2[] = { /* 4915-5080MHz */
116 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
117};
118
119static const u8 iwl_eeprom_band_3[] = { /* 5170-5320MHz */
120 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
121};
122
123static const u8 iwl_eeprom_band_4[] = { /* 5500-5700MHz */
124 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
125};
126
127static const u8 iwl_eeprom_band_5[] = { /* 5725-5825MHz */
128 145, 149, 153, 157, 161, 165
129};
130
131static const u8 iwl_eeprom_band_6[] = { /* 2.4 ht40 channel */
132 1, 2, 3, 4, 5, 6, 7
133};
134
135static const u8 iwl_eeprom_band_7[] = { /* 5.2 ht40 channel */
136 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
137};
138
139/******************************************************************************
140 *
141 * generic NVM functions
142 *
143******************************************************************************/
144
145/*
146 * The device's EEPROM semaphore prevents conflicts between driver and uCode
147 * when accessing the EEPROM; each access is a series of pulses to/from the
148 * EEPROM chip, not a single event, so even reads could conflict if they
149 * weren't arbitrated by the semaphore.
150 */
151
152#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
153#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
154
155static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
156{
157 u16 count;
158 int ret;
159
160 for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
161 /* Request semaphore */
162 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
163 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
164
165 /* See if we got it */
166 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
167 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
168 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
169 EEPROM_SEM_TIMEOUT);
170 if (ret >= 0) {
171 IWL_DEBUG_EEPROM(trans,
172 "Acquired semaphore after %d tries.\n",
173 count+1);
174 return ret;
175 }
176 }
177
178 return ret;
179}
180
181static void iwl_eeprom_release_semaphore(struct iwl_trans *trans)
182{
183 iwl_clear_bit(trans, CSR_HW_IF_CONFIG_REG,
184 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
185
186}
187
188static int iwl_eeprom_verify_signature(struct iwl_priv *priv)
189{
190 u32 gp = iwl_read32(priv->trans, CSR_EEPROM_GP) &
191 CSR_EEPROM_GP_VALID_MSK;
192 int ret = 0;
193
194 IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
195 switch (gp) {
196 case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP:
197 if (priv->nvm_device_type != NVM_DEVICE_TYPE_OTP) {
198 IWL_ERR(priv, "EEPROM with bad signature: 0x%08x\n",
199 gp);
200 ret = -ENOENT;
201 }
202 break;
203 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
204 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
205 if (priv->nvm_device_type != NVM_DEVICE_TYPE_EEPROM) {
206 IWL_ERR(priv, "OTP with bad signature: 0x%08x\n", gp);
207 ret = -ENOENT;
208 }
209 break;
210 case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP:
211 default:
212 IWL_ERR(priv, "bad EEPROM/OTP signature, type=%s, "
213 "EEPROM_GP=0x%08x\n",
214 (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
215 ? "OTP" : "EEPROM", gp);
216 ret = -ENOENT;
217 break;
218 }
219 return ret;
220}
221
222u16 iwl_eeprom_query16(struct iwl_priv *priv, size_t offset)
223{
224 if (!priv->eeprom)
225 return 0;
226 return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
227}
228
229int iwl_eeprom_check_version(struct iwl_priv *priv)
230{
231 u16 eeprom_ver;
232 u16 calib_ver;
233
234 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
235 calib_ver = iwl_eeprom_calib_version(priv);
236
237 if (eeprom_ver < priv->cfg->eeprom_ver ||
238 calib_ver < priv->cfg->eeprom_calib_ver)
239 goto err;
240
241 IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
242 eeprom_ver, calib_ver);
243
244 return 0;
245err:
246 IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
247 "CALIB=0x%x < 0x%x\n",
248 eeprom_ver, priv->cfg->eeprom_ver,
249 calib_ver, priv->cfg->eeprom_calib_ver);
250 return -EINVAL;
251
252}
253
254int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
255{
256 u16 radio_cfg;
257
258 priv->hw_params.sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP);
259 if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE &&
260 !priv->cfg->ht_params) {
261 IWL_ERR(priv, "Invalid 11n configuration\n");
262 return -EINVAL;
263 }
264
265 if (!priv->hw_params.sku) {
266 IWL_ERR(priv, "Invalid device sku\n");
267 return -EINVAL;
268 }
269
270 IWL_INFO(priv, "Device SKU: 0x%X\n", priv->hw_params.sku);
271
272 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
273
274 priv->hw_params.valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
275 priv->hw_params.valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
276
277 /* check overrides (some devices have wrong EEPROM) */
278 if (priv->cfg->valid_tx_ant)
279 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
280 if (priv->cfg->valid_rx_ant)
281 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
282
283 if (!priv->hw_params.valid_tx_ant || !priv->hw_params.valid_rx_ant) {
284 IWL_ERR(priv, "Invalid chain (0x%X, 0x%X)\n",
285 priv->hw_params.valid_tx_ant,
286 priv->hw_params.valid_rx_ant);
287 return -EINVAL;
288 }
289
290 IWL_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n",
291 priv->hw_params.valid_tx_ant, priv->hw_params.valid_rx_ant);
292
293 return 0;
294}
295
296u16 iwl_eeprom_calib_version(struct iwl_priv *priv)
297{
298 struct iwl_eeprom_calib_hdr *hdr;
299
300 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
301 EEPROM_CALIB_ALL);
302 return hdr->version;
303}
304
305static u32 eeprom_indirect_address(struct iwl_priv *priv, u32 address)
306{
307 u16 offset = 0;
308
309 if ((address & INDIRECT_ADDRESS) == 0)
310 return address;
311
312 switch (address & INDIRECT_TYPE_MSK) {
313 case INDIRECT_HOST:
314 offset = iwl_eeprom_query16(priv, EEPROM_LINK_HOST);
315 break;
316 case INDIRECT_GENERAL:
317 offset = iwl_eeprom_query16(priv, EEPROM_LINK_GENERAL);
318 break;
319 case INDIRECT_REGULATORY:
320 offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY);
321 break;
322 case INDIRECT_TXP_LIMIT:
323 offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT);
324 break;
325 case INDIRECT_TXP_LIMIT_SIZE:
326 offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT_SIZE);
327 break;
328 case INDIRECT_CALIBRATION:
329 offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION);
330 break;
331 case INDIRECT_PROCESS_ADJST:
332 offset = iwl_eeprom_query16(priv, EEPROM_LINK_PROCESS_ADJST);
333 break;
334 case INDIRECT_OTHERS:
335 offset = iwl_eeprom_query16(priv, EEPROM_LINK_OTHERS);
336 break;
337 default:
338 IWL_ERR(priv, "illegal indirect type: 0x%X\n",
339 address & INDIRECT_TYPE_MSK);
340 break;
341 }
342
343 /* translate the offset from words to byte */
344 return (address & ADDRESS_MSK) + (offset << 1);
345}
346
347const u8 *iwl_eeprom_query_addr(struct iwl_priv *priv, size_t offset)
348{
349 u32 address = eeprom_indirect_address(priv, offset);
350 BUG_ON(address >= priv->cfg->base_params->eeprom_size);
351 return &priv->eeprom[address];
352}
353
354void iwl_eeprom_get_mac(struct iwl_priv *priv, u8 *mac)
355{
356 const u8 *addr = iwl_eeprom_query_addr(priv,
357 EEPROM_MAC_ADDRESS);
358 memcpy(mac, addr, ETH_ALEN);
359}
360
361/******************************************************************************
362 *
363 * OTP related functions
364 *
365******************************************************************************/
366
367static void iwl_set_otp_access(struct iwl_trans *trans,
368 enum iwl_access_mode mode)
369{
370 iwl_read32(trans, CSR_OTP_GP_REG);
371
372 if (mode == IWL_OTP_ACCESS_ABSOLUTE)
373 iwl_clear_bit(trans, CSR_OTP_GP_REG,
374 CSR_OTP_GP_REG_OTP_ACCESS_MODE);
375 else
376 iwl_set_bit(trans, CSR_OTP_GP_REG,
377 CSR_OTP_GP_REG_OTP_ACCESS_MODE);
378}
379
380static int iwl_get_nvm_type(struct iwl_trans *trans, u32 hw_rev)
381{
382 u32 otpgp;
383 int nvm_type;
384
385 /* OTP only valid for CP/PP and after */
386 switch (hw_rev & CSR_HW_REV_TYPE_MSK) {
387 case CSR_HW_REV_TYPE_NONE:
388 IWL_ERR(trans, "Unknown hardware type\n");
389 return -ENOENT;
390 case CSR_HW_REV_TYPE_5300:
391 case CSR_HW_REV_TYPE_5350:
392 case CSR_HW_REV_TYPE_5100:
393 case CSR_HW_REV_TYPE_5150:
394 nvm_type = NVM_DEVICE_TYPE_EEPROM;
395 break;
396 default:
397 otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
398 if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT)
399 nvm_type = NVM_DEVICE_TYPE_OTP;
400 else
401 nvm_type = NVM_DEVICE_TYPE_EEPROM;
402 break;
403 }
404 return nvm_type;
405}
406
407static int iwl_init_otp_access(struct iwl_trans *trans)
408{
409 int ret;
410
411 /* Enable 40MHz radio clock */
412 iwl_write32(trans, CSR_GP_CNTRL,
413 iwl_read32(trans, CSR_GP_CNTRL) |
414 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
415
416 /* wait for clock to be ready */
417 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
418 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
419 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
420 25000);
421 if (ret < 0)
422 IWL_ERR(trans, "Time out access OTP\n");
423 else {
424 iwl_set_bits_prph(trans, APMG_PS_CTRL_REG,
425 APMG_PS_CTRL_VAL_RESET_REQ);
426 udelay(5);
427 iwl_clear_bits_prph(trans, APMG_PS_CTRL_REG,
428 APMG_PS_CTRL_VAL_RESET_REQ);
429
430 /*
431 * CSR auto clock gate disable bit -
432 * this is only applicable for HW with OTP shadow RAM
433 */
434 if (trans->cfg->base_params->shadow_ram_support)
435 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
436 CSR_RESET_LINK_PWR_MGMT_DISABLED);
437 }
438 return ret;
439}
440
441static int iwl_read_otp_word(struct iwl_trans *trans, u16 addr,
442 __le16 *eeprom_data)
443{
444 int ret = 0;
445 u32 r;
446 u32 otpgp;
447
448 iwl_write32(trans, CSR_EEPROM_REG,
449 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
450 ret = iwl_poll_bit(trans, CSR_EEPROM_REG,
451 CSR_EEPROM_REG_READ_VALID_MSK,
452 CSR_EEPROM_REG_READ_VALID_MSK,
453 IWL_EEPROM_ACCESS_TIMEOUT);
454 if (ret < 0) {
455 IWL_ERR(trans, "Time out reading OTP[%d]\n", addr);
456 return ret;
457 }
458 r = iwl_read32(trans, CSR_EEPROM_REG);
459 /* check for ECC errors: */
460 otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
461 if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
462 /* stop in this case */
463 /* set the uncorrectable OTP ECC bit for acknowledgement */
464 iwl_set_bit(trans, CSR_OTP_GP_REG,
465 CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
466 IWL_ERR(trans, "Uncorrectable OTP ECC error, abort OTP read\n");
467 return -EINVAL;
468 }
469 if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
470 /* continue in this case */
471 /* set the correctable OTP ECC bit for acknowledgement */
472 iwl_set_bit(trans, CSR_OTP_GP_REG,
473 CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
474 IWL_ERR(trans, "Correctable OTP ECC error, continue read\n");
475 }
476 *eeprom_data = cpu_to_le16(r >> 16);
477 return 0;
478}
479
480/*
481 * iwl_is_otp_empty: check for empty OTP
482 */
483static bool iwl_is_otp_empty(struct iwl_trans *trans)
484{
485 u16 next_link_addr = 0;
486 __le16 link_value;
487 bool is_empty = false;
488
489 /* locate the beginning of OTP link list */
490 if (!iwl_read_otp_word(trans, next_link_addr, &link_value)) {
491 if (!link_value) {
492 IWL_ERR(trans, "OTP is empty\n");
493 is_empty = true;
494 }
495 } else {
496 IWL_ERR(trans, "Unable to read first block of OTP list.\n");
497 is_empty = true;
498 }
499
500 return is_empty;
501}
502
503
504/*
505 * iwl_find_otp_image: find EEPROM image in OTP
506 * finding the OTP block that contains the EEPROM image.
507 * the last valid block on the link list (the block _before_ the last block)
508 * is the block we should read and used to configure the device.
509 * If all the available OTP blocks are full, the last block will be the block
510 * we should read and used to configure the device.
511 * only perform this operation if shadow RAM is disabled
512 */
513static int iwl_find_otp_image(struct iwl_trans *trans,
514 u16 *validblockaddr)
515{
516 u16 next_link_addr = 0, valid_addr;
517 __le16 link_value = 0;
518 int usedblocks = 0;
519
520 /* set addressing mode to absolute to traverse the link list */
521 iwl_set_otp_access(trans, IWL_OTP_ACCESS_ABSOLUTE);
522
523 /* checking for empty OTP or error */
524 if (iwl_is_otp_empty(trans))
525 return -EINVAL;
526
527 /*
528 * start traverse link list
529 * until reach the max number of OTP blocks
530 * different devices have different number of OTP blocks
531 */
532 do {
533 /* save current valid block address
534 * check for more block on the link list
535 */
536 valid_addr = next_link_addr;
537 next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
538 IWL_DEBUG_EEPROM(trans, "OTP blocks %d addr 0x%x\n",
539 usedblocks, next_link_addr);
540 if (iwl_read_otp_word(trans, next_link_addr, &link_value))
541 return -EINVAL;
542 if (!link_value) {
543 /*
544 * reach the end of link list, return success and
545 * set address point to the starting address
546 * of the image
547 */
548 *validblockaddr = valid_addr;
549 /* skip first 2 bytes (link list pointer) */
550 *validblockaddr += 2;
551 return 0;
552 }
553 /* more in the link list, continue */
554 usedblocks++;
555 } while (usedblocks <= trans->cfg->base_params->max_ll_items);
556
557 /* OTP has no valid blocks */
558 IWL_DEBUG_EEPROM(trans, "OTP has no valid blocks\n");
559 return -EINVAL;
560}
561
562/******************************************************************************
563 *
564 * Tx Power related functions
565 *
566******************************************************************************/
567/**
568 * iwl_get_max_txpower_avg - get the highest tx power from all chains.
569 * find the highest tx power from all chains for the channel
570 */
571static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
572 struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
573 int element, s8 *max_txpower_in_half_dbm)
574{
575 s8 max_txpower_avg = 0; /* (dBm) */
576
577 /* Take the highest tx power from any valid chains */
578 if ((priv->hw_params.valid_tx_ant & ANT_A) &&
579 (enhanced_txpower[element].chain_a_max > max_txpower_avg))
580 max_txpower_avg = enhanced_txpower[element].chain_a_max;
581 if ((priv->hw_params.valid_tx_ant & ANT_B) &&
582 (enhanced_txpower[element].chain_b_max > max_txpower_avg))
583 max_txpower_avg = enhanced_txpower[element].chain_b_max;
584 if ((priv->hw_params.valid_tx_ant & ANT_C) &&
585 (enhanced_txpower[element].chain_c_max > max_txpower_avg))
586 max_txpower_avg = enhanced_txpower[element].chain_c_max;
587 if (((priv->hw_params.valid_tx_ant == ANT_AB) |
588 (priv->hw_params.valid_tx_ant == ANT_BC) |
589 (priv->hw_params.valid_tx_ant == ANT_AC)) &&
590 (enhanced_txpower[element].mimo2_max > max_txpower_avg))
591 max_txpower_avg = enhanced_txpower[element].mimo2_max;
592 if ((priv->hw_params.valid_tx_ant == ANT_ABC) &&
593 (enhanced_txpower[element].mimo3_max > max_txpower_avg))
594 max_txpower_avg = enhanced_txpower[element].mimo3_max;
595
596 /*
597 * max. tx power in EEPROM is in 1/2 dBm format
598 * convert from 1/2 dBm to dBm (round-up convert)
599 * but we also do not want to loss 1/2 dBm resolution which
600 * will impact performance
601 */
602 *max_txpower_in_half_dbm = max_txpower_avg;
603 return (max_txpower_avg & 0x01) + (max_txpower_avg >> 1);
604}
605
606static void
607iwl_eeprom_enh_txp_read_element(struct iwl_priv *priv,
608 struct iwl_eeprom_enhanced_txpwr *txp,
609 s8 max_txpower_avg)
610{
611 int ch_idx;
612 bool is_ht40 = txp->flags & IWL_EEPROM_ENH_TXP_FL_40MHZ;
613 enum ieee80211_band band;
614
615 band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ?
616 IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
617
618 for (ch_idx = 0; ch_idx < priv->channel_count; ch_idx++) {
619 struct iwl_channel_info *ch_info = &priv->channel_info[ch_idx];
620
621 /* update matching channel or from common data only */
622 if (txp->channel != 0 && ch_info->channel != txp->channel)
623 continue;
624
625 /* update matching band only */
626 if (band != ch_info->band)
627 continue;
628
629 if (ch_info->max_power_avg < max_txpower_avg && !is_ht40) {
630 ch_info->max_power_avg = max_txpower_avg;
631 ch_info->curr_txpow = max_txpower_avg;
632 ch_info->scan_power = max_txpower_avg;
633 }
634
635 if (is_ht40 && ch_info->ht40_max_power_avg < max_txpower_avg)
636 ch_info->ht40_max_power_avg = max_txpower_avg;
637 }
638}
639
640#define EEPROM_TXP_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT)
641#define EEPROM_TXP_ENTRY_LEN sizeof(struct iwl_eeprom_enhanced_txpwr)
642#define EEPROM_TXP_SZ_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT_SIZE)
643
644#define TXP_CHECK_AND_PRINT(x) ((txp->flags & IWL_EEPROM_ENH_TXP_FL_##x) \
645 ? # x " " : "")
646
647static void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
648{
649 struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
650 int idx, entries;
651 __le16 *txp_len;
652 s8 max_txp_avg, max_txp_avg_halfdbm;
653
654 BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
655
656 /* the length is in 16-bit words, but we want entries */
657 txp_len = (__le16 *) iwl_eeprom_query_addr(priv, EEPROM_TXP_SZ_OFFS);
658 entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
659
660 txp_array = (void *) iwl_eeprom_query_addr(priv, EEPROM_TXP_OFFS);
661
662 for (idx = 0; idx < entries; idx++) {
663 txp = &txp_array[idx];
664 /* skip invalid entries */
665 if (!(txp->flags & IWL_EEPROM_ENH_TXP_FL_VALID))
666 continue;
667
668 IWL_DEBUG_EEPROM(priv, "%s %d:\t %s%s%s%s%s%s%s%s (0x%02x)\n",
669 (txp->channel && (txp->flags &
670 IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE)) ?
671 "Common " : (txp->channel) ?
672 "Channel" : "Common",
673 (txp->channel),
674 TXP_CHECK_AND_PRINT(VALID),
675 TXP_CHECK_AND_PRINT(BAND_52G),
676 TXP_CHECK_AND_PRINT(OFDM),
677 TXP_CHECK_AND_PRINT(40MHZ),
678 TXP_CHECK_AND_PRINT(HT_AP),
679 TXP_CHECK_AND_PRINT(RES1),
680 TXP_CHECK_AND_PRINT(RES2),
681 TXP_CHECK_AND_PRINT(COMMON_TYPE),
682 txp->flags);
683 IWL_DEBUG_EEPROM(priv, "\t\t chain_A: 0x%02x "
684 "chain_B: 0X%02x chain_C: 0X%02x\n",
685 txp->chain_a_max, txp->chain_b_max,
686 txp->chain_c_max);
687 IWL_DEBUG_EEPROM(priv, "\t\t MIMO2: 0x%02x "
688 "MIMO3: 0x%02x High 20_on_40: 0x%02x "
689 "Low 20_on_40: 0x%02x\n",
690 txp->mimo2_max, txp->mimo3_max,
691 ((txp->delta_20_in_40 & 0xf0) >> 4),
692 (txp->delta_20_in_40 & 0x0f));
693
694 max_txp_avg = iwl_get_max_txpower_avg(priv, txp_array, idx,
695 &max_txp_avg_halfdbm);
696
697 /*
698 * Update the user limit values values to the highest
699 * power supported by any channel
700 */
701 if (max_txp_avg > priv->tx_power_user_lmt)
702 priv->tx_power_user_lmt = max_txp_avg;
703 if (max_txp_avg_halfdbm > priv->tx_power_lmt_in_half_dbm)
704 priv->tx_power_lmt_in_half_dbm = max_txp_avg_halfdbm;
705
706 iwl_eeprom_enh_txp_read_element(priv, txp, max_txp_avg);
707 }
708}
709
710/**
711 * iwl_eeprom_init - read EEPROM contents
712 *
713 * Load the EEPROM contents from adapter into priv->eeprom
714 *
715 * NOTE: This routine uses the non-debug IO access functions.
716 */
717int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
718{
719 __le16 *e;
720 u32 gp = iwl_read32(priv->trans, CSR_EEPROM_GP);
721 int sz;
722 int ret;
723 u16 addr;
724 u16 validblockaddr = 0;
725 u16 cache_addr = 0;
726
727 priv->nvm_device_type = iwl_get_nvm_type(priv->trans, hw_rev);
728 if (priv->nvm_device_type == -ENOENT)
729 return -ENOENT;
730 /* allocate eeprom */
731 sz = priv->cfg->base_params->eeprom_size;
732 IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
733 priv->eeprom = kzalloc(sz, GFP_KERNEL);
734 if (!priv->eeprom) {
735 ret = -ENOMEM;
736 goto alloc_err;
737 }
738 e = (__le16 *)priv->eeprom;
739
740 ret = iwl_eeprom_verify_signature(priv);
741 if (ret < 0) {
742 IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
743 ret = -ENOENT;
744 goto err;
745 }
746
747 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
748 ret = iwl_eeprom_acquire_semaphore(priv->trans);
749 if (ret < 0) {
750 IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
751 ret = -ENOENT;
752 goto err;
753 }
754
755 if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) {
756
757 ret = iwl_init_otp_access(priv->trans);
758 if (ret) {
759 IWL_ERR(priv, "Failed to initialize OTP access.\n");
760 ret = -ENOENT;
761 goto done;
762 }
763 iwl_write32(priv->trans, CSR_EEPROM_GP,
764 iwl_read32(priv->trans, CSR_EEPROM_GP) &
765 ~CSR_EEPROM_GP_IF_OWNER_MSK);
766
767 iwl_set_bit(priv->trans, CSR_OTP_GP_REG,
768 CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
769 CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
770 /* traversing the linked list if no shadow ram supported */
771 if (!priv->cfg->base_params->shadow_ram_support) {
772 if (iwl_find_otp_image(priv->trans, &validblockaddr)) {
773 ret = -ENOENT;
774 goto done;
775 }
776 }
777 for (addr = validblockaddr; addr < validblockaddr + sz;
778 addr += sizeof(u16)) {
779 __le16 eeprom_data;
780
781 ret = iwl_read_otp_word(priv->trans, addr,
782 &eeprom_data);
783 if (ret)
784 goto done;
785 e[cache_addr / 2] = eeprom_data;
786 cache_addr += sizeof(u16);
787 }
788 } else {
789 /* eeprom is an array of 16bit values */
790 for (addr = 0; addr < sz; addr += sizeof(u16)) {
791 u32 r;
792
793 iwl_write32(priv->trans, CSR_EEPROM_REG,
794 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
795
796 ret = iwl_poll_bit(priv->trans, CSR_EEPROM_REG,
797 CSR_EEPROM_REG_READ_VALID_MSK,
798 CSR_EEPROM_REG_READ_VALID_MSK,
799 IWL_EEPROM_ACCESS_TIMEOUT);
800 if (ret < 0) {
801 IWL_ERR(priv,
802 "Time out reading EEPROM[%d]\n", addr);
803 goto done;
804 }
805 r = iwl_read32(priv->trans, CSR_EEPROM_REG);
806 e[addr / 2] = cpu_to_le16(r >> 16);
807 }
808 }
809
810 IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
811 (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
812 ? "OTP" : "EEPROM",
813 iwl_eeprom_query16(priv, EEPROM_VERSION));
814
815 ret = 0;
816done:
817 iwl_eeprom_release_semaphore(priv->trans);
818
819err:
820 if (ret)
821 iwl_eeprom_free(priv);
822alloc_err:
823 return ret;
824}
825
826void iwl_eeprom_free(struct iwl_priv *priv)
827{
828 kfree(priv->eeprom);
829 priv->eeprom = NULL;
830}
831
832static void iwl_init_band_reference(struct iwl_priv *priv,
833 int eep_band, int *eeprom_ch_count,
834 const struct iwl_eeprom_channel **eeprom_ch_info,
835 const u8 **eeprom_ch_index)
836{
837 u32 offset = priv->lib->
838 eeprom_ops.regulatory_bands[eep_band - 1];
839 switch (eep_band) {
840 case 1: /* 2.4GHz band */
841 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
842 *eeprom_ch_info = (struct iwl_eeprom_channel *)
843 iwl_eeprom_query_addr(priv, offset);
844 *eeprom_ch_index = iwl_eeprom_band_1;
845 break;
846 case 2: /* 4.9GHz band */
847 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
848 *eeprom_ch_info = (struct iwl_eeprom_channel *)
849 iwl_eeprom_query_addr(priv, offset);
850 *eeprom_ch_index = iwl_eeprom_band_2;
851 break;
852 case 3: /* 5.2GHz band */
853 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
854 *eeprom_ch_info = (struct iwl_eeprom_channel *)
855 iwl_eeprom_query_addr(priv, offset);
856 *eeprom_ch_index = iwl_eeprom_band_3;
857 break;
858 case 4: /* 5.5GHz band */
859 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
860 *eeprom_ch_info = (struct iwl_eeprom_channel *)
861 iwl_eeprom_query_addr(priv, offset);
862 *eeprom_ch_index = iwl_eeprom_band_4;
863 break;
864 case 5: /* 5.7GHz band */
865 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
866 *eeprom_ch_info = (struct iwl_eeprom_channel *)
867 iwl_eeprom_query_addr(priv, offset);
868 *eeprom_ch_index = iwl_eeprom_band_5;
869 break;
870 case 6: /* 2.4GHz ht40 channels */
871 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6);
872 *eeprom_ch_info = (struct iwl_eeprom_channel *)
873 iwl_eeprom_query_addr(priv, offset);
874 *eeprom_ch_index = iwl_eeprom_band_6;
875 break;
876 case 7: /* 5 GHz ht40 channels */
877 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7);
878 *eeprom_ch_info = (struct iwl_eeprom_channel *)
879 iwl_eeprom_query_addr(priv, offset);
880 *eeprom_ch_index = iwl_eeprom_band_7;
881 break;
882 default:
883 BUG();
884 return;
885 }
886}
887
888#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
889 ? # x " " : "")
890/**
891 * iwl_mod_ht40_chan_info - Copy ht40 channel info into driver's priv.
892 *
893 * Does not set up a command, or touch hardware.
894 */
895static int iwl_mod_ht40_chan_info(struct iwl_priv *priv,
896 enum ieee80211_band band, u16 channel,
897 const struct iwl_eeprom_channel *eeprom_ch,
898 u8 clear_ht40_extension_channel)
899{
900 struct iwl_channel_info *ch_info;
901
902 ch_info = (struct iwl_channel_info *)
903 iwl_get_channel_info(priv, band, channel);
904
905 if (!is_channel_valid(ch_info))
906 return -1;
907
908 IWL_DEBUG_EEPROM(priv, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
909 " Ad-Hoc %ssupported\n",
910 ch_info->channel,
911 is_channel_a_band(ch_info) ?
912 "5.2" : "2.4",
913 CHECK_AND_PRINT(IBSS),
914 CHECK_AND_PRINT(ACTIVE),
915 CHECK_AND_PRINT(RADAR),
916 CHECK_AND_PRINT(WIDE),
917 CHECK_AND_PRINT(DFS),
918 eeprom_ch->flags,
919 eeprom_ch->max_power_avg,
920 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS)
921 && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ?
922 "" : "not ");
923
924 ch_info->ht40_eeprom = *eeprom_ch;
925 ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
926 ch_info->ht40_flags = eeprom_ch->flags;
927 if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
928 ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel;
929
930 return 0;
931}
932
933#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
934 ? # x " " : "")
935
936/**
937 * iwl_init_channel_map - Set up driver's info for all possible channels
938 */
939int iwl_init_channel_map(struct iwl_priv *priv)
940{
941 int eeprom_ch_count = 0;
942 const u8 *eeprom_ch_index = NULL;
943 const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
944 int band, ch;
945 struct iwl_channel_info *ch_info;
946
947 if (priv->channel_count) {
948 IWL_DEBUG_EEPROM(priv, "Channel map already initialized.\n");
949 return 0;
950 }
951
952 IWL_DEBUG_EEPROM(priv, "Initializing regulatory info from EEPROM\n");
953
954 priv->channel_count =
955 ARRAY_SIZE(iwl_eeprom_band_1) +
956 ARRAY_SIZE(iwl_eeprom_band_2) +
957 ARRAY_SIZE(iwl_eeprom_band_3) +
958 ARRAY_SIZE(iwl_eeprom_band_4) +
959 ARRAY_SIZE(iwl_eeprom_band_5);
960
961 IWL_DEBUG_EEPROM(priv, "Parsing data for %d channels.\n",
962 priv->channel_count);
963
964 priv->channel_info = kcalloc(priv->channel_count,
965 sizeof(struct iwl_channel_info),
966 GFP_KERNEL);
967 if (!priv->channel_info) {
968 IWL_ERR(priv, "Could not allocate channel_info\n");
969 priv->channel_count = 0;
970 return -ENOMEM;
971 }
972
973 ch_info = priv->channel_info;
974
975 /* Loop through the 5 EEPROM bands adding them in order to the
976 * channel map we maintain (that contains additional information than
977 * what just in the EEPROM) */
978 for (band = 1; band <= 5; band++) {
979
980 iwl_init_band_reference(priv, band, &eeprom_ch_count,
981 &eeprom_ch_info, &eeprom_ch_index);
982
983 /* Loop through each band adding each of the channels */
984 for (ch = 0; ch < eeprom_ch_count; ch++) {
985 ch_info->channel = eeprom_ch_index[ch];
986 ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ :
987 IEEE80211_BAND_5GHZ;
988
989 /* permanently store EEPROM's channel regulatory flags
990 * and max power in channel info database. */
991 ch_info->eeprom = eeprom_ch_info[ch];
992
993 /* Copy the run-time flags so they are there even on
994 * invalid channels */
995 ch_info->flags = eeprom_ch_info[ch].flags;
996 /* First write that ht40 is not enabled, and then enable
997 * one by one */
998 ch_info->ht40_extension_channel =
999 IEEE80211_CHAN_NO_HT40;
1000
1001 if (!(is_channel_valid(ch_info))) {
1002 IWL_DEBUG_EEPROM(priv,
1003 "Ch. %d Flags %x [%sGHz] - "
1004 "No traffic\n",
1005 ch_info->channel,
1006 ch_info->flags,
1007 is_channel_a_band(ch_info) ?
1008 "5.2" : "2.4");
1009 ch_info++;
1010 continue;
1011 }
1012
1013 /* Initialize regulatory-based run-time data */
1014 ch_info->max_power_avg = ch_info->curr_txpow =
1015 eeprom_ch_info[ch].max_power_avg;
1016 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
1017 ch_info->min_power = 0;
1018
1019 IWL_DEBUG_EEPROM(priv, "Ch. %d [%sGHz] "
1020 "%s%s%s%s%s%s(0x%02x %ddBm):"
1021 " Ad-Hoc %ssupported\n",
1022 ch_info->channel,
1023 is_channel_a_band(ch_info) ?
1024 "5.2" : "2.4",
1025 CHECK_AND_PRINT_I(VALID),
1026 CHECK_AND_PRINT_I(IBSS),
1027 CHECK_AND_PRINT_I(ACTIVE),
1028 CHECK_AND_PRINT_I(RADAR),
1029 CHECK_AND_PRINT_I(WIDE),
1030 CHECK_AND_PRINT_I(DFS),
1031 eeprom_ch_info[ch].flags,
1032 eeprom_ch_info[ch].max_power_avg,
1033 ((eeprom_ch_info[ch].
1034 flags & EEPROM_CHANNEL_IBSS)
1035 && !(eeprom_ch_info[ch].
1036 flags & EEPROM_CHANNEL_RADAR))
1037 ? "" : "not ");
1038
1039 ch_info++;
1040 }
1041 }
1042
1043 /* Check if we do have HT40 channels */
1044 if (priv->lib->eeprom_ops.regulatory_bands[5] ==
1045 EEPROM_REGULATORY_BAND_NO_HT40 &&
1046 priv->lib->eeprom_ops.regulatory_bands[6] ==
1047 EEPROM_REGULATORY_BAND_NO_HT40)
1048 return 0;
1049
1050 /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
1051 for (band = 6; band <= 7; band++) {
1052 enum ieee80211_band ieeeband;
1053
1054 iwl_init_band_reference(priv, band, &eeprom_ch_count,
1055 &eeprom_ch_info, &eeprom_ch_index);
1056
1057 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
1058 ieeeband =
1059 (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
1060
1061 /* Loop through each band adding each of the channels */
1062 for (ch = 0; ch < eeprom_ch_count; ch++) {
1063 /* Set up driver's info for lower half */
1064 iwl_mod_ht40_chan_info(priv, ieeeband,
1065 eeprom_ch_index[ch],
1066 &eeprom_ch_info[ch],
1067 IEEE80211_CHAN_NO_HT40PLUS);
1068
1069 /* Set up driver's info for upper half */
1070 iwl_mod_ht40_chan_info(priv, ieeeband,
1071 eeprom_ch_index[ch] + 4,
1072 &eeprom_ch_info[ch],
1073 IEEE80211_CHAN_NO_HT40MINUS);
1074 }
1075 }
1076
1077 /* for newer device (6000 series and up)
1078 * EEPROM contain enhanced tx power information
1079 * driver need to process addition information
1080 * to determine the max channel tx power limits
1081 */
1082 if (priv->lib->eeprom_ops.enhanced_txpower)
1083 iwl_eeprom_enhanced_txpower(priv);
1084
1085 return 0;
1086}
1087
1088/*
1089 * iwl_free_channel_map - undo allocations in iwl_init_channel_map
1090 */
1091void iwl_free_channel_map(struct iwl_priv *priv)
1092{
1093 kfree(priv->channel_info);
1094 priv->channel_count = 0;
1095}
1096
1097/**
1098 * iwl_get_channel_info - Find driver's private channel info
1099 *
1100 * Based on band and channel number.
1101 */
1102const struct iwl_channel_info *iwl_get_channel_info(const struct iwl_priv *priv,
1103 enum ieee80211_band band, u16 channel)
1104{
1105 int i;
1106
1107 switch (band) {
1108 case IEEE80211_BAND_5GHZ:
1109 for (i = 14; i < priv->channel_count; i++) {
1110 if (priv->channel_info[i].channel == channel)
1111 return &priv->channel_info[i];
1112 }
1113 break;
1114 case IEEE80211_BAND_2GHZ:
1115 if (channel >= 1 && channel <= 14)
1116 return &priv->channel_info[channel - 1];
1117 break;
1118 default:
1119 BUG();
1120 }
1121
1122 return NULL;
1123}
1124
1125void iwl_rf_config(struct iwl_priv *priv)
1126{
1127 u16 radio_cfg;
1128
1129 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
1130
1131 /* write radio config values to register */
1132 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) {
1133 iwl_set_bit(priv->trans, CSR_HW_IF_CONFIG_REG,
1134 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
1135 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
1136 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
1137 IWL_INFO(priv, "Radio type=0x%x-0x%x-0x%x\n",
1138 EEPROM_RF_CFG_TYPE_MSK(radio_cfg),
1139 EEPROM_RF_CFG_STEP_MSK(radio_cfg),
1140 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
1141 } else
1142 WARN_ON(1);
1143
1144 /* set CSR_HW_CONFIG_REG for uCode use */
1145 iwl_set_bit(priv->trans, CSR_HW_IF_CONFIG_REG,
1146 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
1147 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
1148}
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
deleted file mode 100644
index 64bfd947caeb..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ /dev/null
@@ -1,269 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_eeprom_h__
64#define __iwl_eeprom_h__
65
66#include <net/mac80211.h>
67
68struct iwl_priv;
69
70/*
71 * EEPROM access time values:
72 *
73 * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
74 * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
75 * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
76 * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
77 */
78#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
79
80#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
81#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
82
83
84/*
85 * Regulatory channel usage flags in EEPROM struct iwl4965_eeprom_channel.flags.
86 *
87 * IBSS and/or AP operation is allowed *only* on those channels with
88 * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because
89 * RADAR detection is not supported by the 4965 driver, but is a
90 * requirement for establishing a new network for legal operation on channels
91 * requiring RADAR detection or restricting ACTIVE scanning.
92 *
93 * NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
94 * It only indicates that 20 MHz channel use is supported; HT40 channel
95 * usage is indicated by a separate set of regulatory flags for each
96 * HT40 channel pair.
97 *
98 * NOTE: Using a channel inappropriately will result in a uCode error!
99 */
100#define IWL_NUM_TX_CALIB_GROUPS 5
101enum {
102 EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */
103 EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */
104 /* Bit 2 Reserved */
105 EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
106 EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
107 EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
108 /* Bit 6 Reserved (was Narrow Channel) */
109 EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
110};
111
112/* SKU Capabilities */
113#define EEPROM_SKU_CAP_BAND_24GHZ (1 << 4)
114#define EEPROM_SKU_CAP_BAND_52GHZ (1 << 5)
115#define EEPROM_SKU_CAP_11N_ENABLE (1 << 6)
116#define EEPROM_SKU_CAP_AMT_ENABLE (1 << 7)
117#define EEPROM_SKU_CAP_IPAN_ENABLE (1 << 8)
118
119/* *regulatory* channel data format in eeprom, one for each channel.
120 * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
121struct iwl_eeprom_channel {
122 u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
123 s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
124} __packed;
125
126enum iwl_eeprom_enhanced_txpwr_flags {
127 IWL_EEPROM_ENH_TXP_FL_VALID = BIT(0),
128 IWL_EEPROM_ENH_TXP_FL_BAND_52G = BIT(1),
129 IWL_EEPROM_ENH_TXP_FL_OFDM = BIT(2),
130 IWL_EEPROM_ENH_TXP_FL_40MHZ = BIT(3),
131 IWL_EEPROM_ENH_TXP_FL_HT_AP = BIT(4),
132 IWL_EEPROM_ENH_TXP_FL_RES1 = BIT(5),
133 IWL_EEPROM_ENH_TXP_FL_RES2 = BIT(6),
134 IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE = BIT(7),
135};
136
137/**
138 * iwl_eeprom_enhanced_txpwr structure
139 * This structure presents the enhanced regulatory tx power limit layout
140 * in eeprom image
141 * Enhanced regulatory tx power portion of eeprom image can be broken down
142 * into individual structures; each one is 8 bytes in size and contain the
143 * following information
144 * @flags: entry flags
145 * @channel: channel number
146 * @chain_a_max_pwr: chain a max power in 1/2 dBm
147 * @chain_b_max_pwr: chain b max power in 1/2 dBm
148 * @chain_c_max_pwr: chain c max power in 1/2 dBm
149 * @delta_20_in_40: 20-in-40 deltas (hi/lo)
150 * @mimo2_max_pwr: mimo2 max power in 1/2 dBm
151 * @mimo3_max_pwr: mimo3 max power in 1/2 dBm
152 *
153 */
154struct iwl_eeprom_enhanced_txpwr {
155 u8 flags;
156 u8 channel;
157 s8 chain_a_max;
158 s8 chain_b_max;
159 s8 chain_c_max;
160 u8 delta_20_in_40;
161 s8 mimo2_max;
162 s8 mimo3_max;
163} __packed;
164
165/* calibration */
166struct iwl_eeprom_calib_hdr {
167 u8 version;
168 u8 pa_type;
169 __le16 voltage;
170} __packed;
171
172#define EEPROM_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION)
173#define EEPROM_XTAL ((2*0x128) | EEPROM_CALIB_ALL)
174
175/* temperature */
176#define EEPROM_KELVIN_TEMPERATURE ((2*0x12A) | EEPROM_CALIB_ALL)
177#define EEPROM_RAW_TEMPERATURE ((2*0x12B) | EEPROM_CALIB_ALL)
178
179
180/* agn links */
181#define EEPROM_LINK_HOST (2*0x64)
182#define EEPROM_LINK_GENERAL (2*0x65)
183#define EEPROM_LINK_REGULATORY (2*0x66)
184#define EEPROM_LINK_CALIBRATION (2*0x67)
185#define EEPROM_LINK_PROCESS_ADJST (2*0x68)
186#define EEPROM_LINK_OTHERS (2*0x69)
187#define EEPROM_LINK_TXP_LIMIT (2*0x6a)
188#define EEPROM_LINK_TXP_LIMIT_SIZE (2*0x6b)
189
190/* agn regulatory - indirect access */
191#define EEPROM_REG_BAND_1_CHANNELS ((0x08)\
192 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 28 bytes */
193#define EEPROM_REG_BAND_2_CHANNELS ((0x26)\
194 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 26 bytes */
195#define EEPROM_REG_BAND_3_CHANNELS ((0x42)\
196 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */
197#define EEPROM_REG_BAND_4_CHANNELS ((0x5C)\
198 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */
199#define EEPROM_REG_BAND_5_CHANNELS ((0x74)\
200 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 12 bytes */
201#define EEPROM_REG_BAND_24_HT40_CHANNELS ((0x82)\
202 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */
203#define EEPROM_REG_BAND_52_HT40_CHANNELS ((0x92)\
204 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */
205
206/* 6000 regulatory - indirect access */
207#define EEPROM_6000_REG_BAND_24_HT40_CHANNELS ((0x80)\
208 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */
209/* 2.4 GHz */
210extern const u8 iwl_eeprom_band_1[14];
211
212#define ADDRESS_MSK 0x0000FFFF
213#define INDIRECT_TYPE_MSK 0x000F0000
214#define INDIRECT_HOST 0x00010000
215#define INDIRECT_GENERAL 0x00020000
216#define INDIRECT_REGULATORY 0x00030000
217#define INDIRECT_CALIBRATION 0x00040000
218#define INDIRECT_PROCESS_ADJST 0x00050000
219#define INDIRECT_OTHERS 0x00060000
220#define INDIRECT_TXP_LIMIT 0x00070000
221#define INDIRECT_TXP_LIMIT_SIZE 0x00080000
222#define INDIRECT_ADDRESS 0x00100000
223
224/* General */
225#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
226#define EEPROM_SUBSYSTEM_ID (2*0x0A) /* 2 bytes */
227#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
228#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
229#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
230#define EEPROM_VERSION (2*0x44) /* 2 bytes */
231#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
232#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
233#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
234#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
235
236/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
237#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
238#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
239#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
240#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
241#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
242#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
243
244#define EEPROM_RF_CONFIG_TYPE_MAX 0x3
245
246#define EEPROM_REGULATORY_BAND_NO_HT40 (0)
247
248struct iwl_eeprom_ops {
249 const u32 regulatory_bands[7];
250 bool enhanced_txpower;
251};
252
253
254int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev);
255void iwl_eeprom_free(struct iwl_priv *priv);
256int iwl_eeprom_check_version(struct iwl_priv *priv);
257int iwl_eeprom_init_hw_params(struct iwl_priv *priv);
258u16 iwl_eeprom_calib_version(struct iwl_priv *priv);
259const u8 *iwl_eeprom_query_addr(struct iwl_priv *priv, size_t offset);
260u16 iwl_eeprom_query16(struct iwl_priv *priv, size_t offset);
261void iwl_eeprom_get_mac(struct iwl_priv *priv, u8 *mac);
262int iwl_init_channel_map(struct iwl_priv *priv);
263void iwl_free_channel_map(struct iwl_priv *priv);
264const struct iwl_channel_info *iwl_get_channel_info(
265 const struct iwl_priv *priv,
266 enum ieee80211_band band, u16 channel);
267void iwl_rf_config(struct iwl_priv *priv);
268
269#endif /* __iwl_eeprom_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 74bce97a8600..806046641747 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -421,6 +421,8 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
421 (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4) 421 (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
422 422
423#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98) 423#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98)
424#define FH_TX_TRB_REG(_chan) (FH_MEM_LOWER_BOUND + 0x958 + (_chan) * 4)
425
424/* Instruct FH to increment the retry count of a packet when 426/* Instruct FH to increment the retry count of a packet when
425 * it is brought from the memory to TX-FIFO 427 * it is brought from the memory to TX-FIFO
426 */ 428 */
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index 081dd34d2387..66c873399aba 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -27,6 +27,7 @@
27 *****************************************************************************/ 27 *****************************************************************************/
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/device.h> 29#include <linux/device.h>
30#include <linux/export.h>
30 31
31#include "iwl-io.h" 32#include "iwl-io.h"
32#include"iwl-csr.h" 33#include"iwl-csr.h"
@@ -52,6 +53,7 @@ void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask)
52 __iwl_set_bit(trans, reg, mask); 53 __iwl_set_bit(trans, reg, mask);
53 spin_unlock_irqrestore(&trans->reg_lock, flags); 54 spin_unlock_irqrestore(&trans->reg_lock, flags);
54} 55}
56EXPORT_SYMBOL_GPL(iwl_set_bit);
55 57
56void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask) 58void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask)
57{ 59{
@@ -61,6 +63,25 @@ void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask)
61 __iwl_clear_bit(trans, reg, mask); 63 __iwl_clear_bit(trans, reg, mask);
62 spin_unlock_irqrestore(&trans->reg_lock, flags); 64 spin_unlock_irqrestore(&trans->reg_lock, flags);
63} 65}
66EXPORT_SYMBOL_GPL(iwl_clear_bit);
67
68void iwl_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
69{
70 unsigned long flags;
71 u32 v;
72
73#ifdef CONFIG_IWLWIFI_DEBUG
74 WARN_ON_ONCE(value & ~mask);
75#endif
76
77 spin_lock_irqsave(&trans->reg_lock, flags);
78 v = iwl_read32(trans, reg);
79 v &= ~mask;
80 v |= value;
81 iwl_write32(trans, reg, v);
82 spin_unlock_irqrestore(&trans->reg_lock, flags);
83}
84EXPORT_SYMBOL_GPL(iwl_set_bits_mask);
64 85
65int iwl_poll_bit(struct iwl_trans *trans, u32 addr, 86int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
66 u32 bits, u32 mask, int timeout) 87 u32 bits, u32 mask, int timeout)
@@ -76,6 +97,7 @@ int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
76 97
77 return -ETIMEDOUT; 98 return -ETIMEDOUT;
78} 99}
100EXPORT_SYMBOL_GPL(iwl_poll_bit);
79 101
80int iwl_grab_nic_access_silent(struct iwl_trans *trans) 102int iwl_grab_nic_access_silent(struct iwl_trans *trans)
81{ 103{
@@ -117,6 +139,7 @@ int iwl_grab_nic_access_silent(struct iwl_trans *trans)
117 139
118 return 0; 140 return 0;
119} 141}
142EXPORT_SYMBOL_GPL(iwl_grab_nic_access_silent);
120 143
121bool iwl_grab_nic_access(struct iwl_trans *trans) 144bool iwl_grab_nic_access(struct iwl_trans *trans)
122{ 145{
@@ -130,6 +153,7 @@ bool iwl_grab_nic_access(struct iwl_trans *trans)
130 153
131 return true; 154 return true;
132} 155}
156EXPORT_SYMBOL_GPL(iwl_grab_nic_access);
133 157
134void iwl_release_nic_access(struct iwl_trans *trans) 158void iwl_release_nic_access(struct iwl_trans *trans)
135{ 159{
@@ -144,6 +168,7 @@ void iwl_release_nic_access(struct iwl_trans *trans)
144 */ 168 */
145 mmiowb(); 169 mmiowb();
146} 170}
171EXPORT_SYMBOL_GPL(iwl_release_nic_access);
147 172
148u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg) 173u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
149{ 174{
@@ -158,6 +183,7 @@ u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
158 183
159 return value; 184 return value;
160} 185}
186EXPORT_SYMBOL_GPL(iwl_read_direct32);
161 187
162void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value) 188void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value)
163{ 189{
@@ -170,6 +196,7 @@ void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value)
170 } 196 }
171 spin_unlock_irqrestore(&trans->reg_lock, flags); 197 spin_unlock_irqrestore(&trans->reg_lock, flags);
172} 198}
199EXPORT_SYMBOL_GPL(iwl_write_direct32);
173 200
174int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask, 201int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
175 int timeout) 202 int timeout)
@@ -185,6 +212,7 @@ int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
185 212
186 return -ETIMEDOUT; 213 return -ETIMEDOUT;
187} 214}
215EXPORT_SYMBOL_GPL(iwl_poll_direct_bit);
188 216
189static inline u32 __iwl_read_prph(struct iwl_trans *trans, u32 reg) 217static inline u32 __iwl_read_prph(struct iwl_trans *trans, u32 reg)
190{ 218{
@@ -211,6 +239,7 @@ u32 iwl_read_prph(struct iwl_trans *trans, u32 reg)
211 spin_unlock_irqrestore(&trans->reg_lock, flags); 239 spin_unlock_irqrestore(&trans->reg_lock, flags);
212 return val; 240 return val;
213} 241}
242EXPORT_SYMBOL_GPL(iwl_read_prph);
214 243
215void iwl_write_prph(struct iwl_trans *trans, u32 addr, u32 val) 244void iwl_write_prph(struct iwl_trans *trans, u32 addr, u32 val)
216{ 245{
@@ -223,6 +252,7 @@ void iwl_write_prph(struct iwl_trans *trans, u32 addr, u32 val)
223 } 252 }
224 spin_unlock_irqrestore(&trans->reg_lock, flags); 253 spin_unlock_irqrestore(&trans->reg_lock, flags);
225} 254}
255EXPORT_SYMBOL_GPL(iwl_write_prph);
226 256
227void iwl_set_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask) 257void iwl_set_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask)
228{ 258{
@@ -236,6 +266,7 @@ void iwl_set_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask)
236 } 266 }
237 spin_unlock_irqrestore(&trans->reg_lock, flags); 267 spin_unlock_irqrestore(&trans->reg_lock, flags);
238} 268}
269EXPORT_SYMBOL_GPL(iwl_set_bits_prph);
239 270
240void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 reg, 271void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 reg,
241 u32 bits, u32 mask) 272 u32 bits, u32 mask)
@@ -250,6 +281,7 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 reg,
250 } 281 }
251 spin_unlock_irqrestore(&trans->reg_lock, flags); 282 spin_unlock_irqrestore(&trans->reg_lock, flags);
252} 283}
284EXPORT_SYMBOL_GPL(iwl_set_bits_mask_prph);
253 285
254void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask) 286void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask)
255{ 287{
@@ -264,9 +296,10 @@ void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask)
264 } 296 }
265 spin_unlock_irqrestore(&trans->reg_lock, flags); 297 spin_unlock_irqrestore(&trans->reg_lock, flags);
266} 298}
299EXPORT_SYMBOL_GPL(iwl_clear_bits_prph);
267 300
268void _iwl_read_targ_mem_words(struct iwl_trans *trans, u32 addr, 301void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
269 void *buf, int words) 302 void *buf, int dwords)
270{ 303{
271 unsigned long flags; 304 unsigned long flags;
272 int offs; 305 int offs;
@@ -275,24 +308,26 @@ void _iwl_read_targ_mem_words(struct iwl_trans *trans, u32 addr,
275 spin_lock_irqsave(&trans->reg_lock, flags); 308 spin_lock_irqsave(&trans->reg_lock, flags);
276 if (likely(iwl_grab_nic_access(trans))) { 309 if (likely(iwl_grab_nic_access(trans))) {
277 iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr); 310 iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
278 for (offs = 0; offs < words; offs++) 311 for (offs = 0; offs < dwords; offs++)
279 vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT); 312 vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
280 iwl_release_nic_access(trans); 313 iwl_release_nic_access(trans);
281 } 314 }
282 spin_unlock_irqrestore(&trans->reg_lock, flags); 315 spin_unlock_irqrestore(&trans->reg_lock, flags);
283} 316}
317EXPORT_SYMBOL_GPL(_iwl_read_targ_mem_dwords);
284 318
285u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr) 319u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr)
286{ 320{
287 u32 value; 321 u32 value;
288 322
289 _iwl_read_targ_mem_words(trans, addr, &value, 1); 323 _iwl_read_targ_mem_dwords(trans, addr, &value, 1);
290 324
291 return value; 325 return value;
292} 326}
327EXPORT_SYMBOL_GPL(iwl_read_targ_mem);
293 328
294int _iwl_write_targ_mem_words(struct iwl_trans *trans, u32 addr, 329int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
295 void *buf, int words) 330 void *buf, int dwords)
296{ 331{
297 unsigned long flags; 332 unsigned long flags;
298 int offs, result = 0; 333 int offs, result = 0;
@@ -301,7 +336,7 @@ int _iwl_write_targ_mem_words(struct iwl_trans *trans, u32 addr,
301 spin_lock_irqsave(&trans->reg_lock, flags); 336 spin_lock_irqsave(&trans->reg_lock, flags);
302 if (likely(iwl_grab_nic_access(trans))) { 337 if (likely(iwl_grab_nic_access(trans))) {
303 iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr); 338 iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
304 for (offs = 0; offs < words; offs++) 339 for (offs = 0; offs < dwords; offs++)
305 iwl_write32(trans, HBUS_TARG_MEM_WDAT, vals[offs]); 340 iwl_write32(trans, HBUS_TARG_MEM_WDAT, vals[offs]);
306 iwl_release_nic_access(trans); 341 iwl_release_nic_access(trans);
307 } else 342 } else
@@ -310,8 +345,10 @@ int _iwl_write_targ_mem_words(struct iwl_trans *trans, u32 addr,
310 345
311 return result; 346 return result;
312} 347}
348EXPORT_SYMBOL_GPL(_iwl_write_targ_mem_dwords);
313 349
314int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val) 350int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val)
315{ 351{
316 return _iwl_write_targ_mem_words(trans, addr, &val, 1); 352 return _iwl_write_targ_mem_dwords(trans, addr, &val, 1);
317} 353}
354EXPORT_SYMBOL_GPL(iwl_write_targ_mem);
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index abb3250164ba..50d3819739d1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -54,6 +54,8 @@ static inline u32 iwl_read32(struct iwl_trans *trans, u32 ofs)
54void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask); 54void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask);
55void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask); 55void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask);
56 56
57void iwl_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value);
58
57int iwl_poll_bit(struct iwl_trans *trans, u32 addr, 59int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
58 u32 bits, u32 mask, int timeout); 60 u32 bits, u32 mask, int timeout);
59int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask, 61int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
@@ -74,18 +76,18 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 reg,
74 u32 bits, u32 mask); 76 u32 bits, u32 mask);
75void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask); 77void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask);
76 78
77void _iwl_read_targ_mem_words(struct iwl_trans *trans, u32 addr, 79void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
78 void *buf, int words); 80 void *buf, int dwords);
79 81
80#define iwl_read_targ_mem_words(trans, addr, buf, bufsize) \ 82#define iwl_read_targ_mem_bytes(trans, addr, buf, bufsize) \
81 do { \ 83 do { \
82 BUILD_BUG_ON((bufsize) % sizeof(u32)); \ 84 BUILD_BUG_ON((bufsize) % sizeof(u32)); \
83 _iwl_read_targ_mem_words(trans, addr, buf, \ 85 _iwl_read_targ_mem_dwords(trans, addr, buf, \
84 (bufsize) / sizeof(u32));\ 86 (bufsize) / sizeof(u32));\
85 } while (0) 87 } while (0)
86 88
87int _iwl_write_targ_mem_words(struct iwl_trans *trans, u32 addr, 89int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
88 void *buf, int words); 90 void *buf, int dwords);
89 91
90u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr); 92u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr);
91int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val); 93int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val);
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
index 0066b899fe5c..c61f2070f15a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
@@ -61,6 +61,7 @@
61 * 61 *
62 *****************************************************************************/ 62 *****************************************************************************/
63#include <linux/sched.h> 63#include <linux/sched.h>
64#include <linux/export.h>
64 65
65#include "iwl-notif-wait.h" 66#include "iwl-notif-wait.h"
66 67
@@ -71,6 +72,7 @@ void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_wait)
71 INIT_LIST_HEAD(&notif_wait->notif_waits); 72 INIT_LIST_HEAD(&notif_wait->notif_waits);
72 init_waitqueue_head(&notif_wait->notif_waitq); 73 init_waitqueue_head(&notif_wait->notif_waitq);
73} 74}
75EXPORT_SYMBOL_GPL(iwl_notification_wait_init);
74 76
75void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait, 77void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
76 struct iwl_rx_packet *pkt) 78 struct iwl_rx_packet *pkt)
@@ -115,20 +117,20 @@ void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
115 if (triggered) 117 if (triggered)
116 wake_up_all(&notif_wait->notif_waitq); 118 wake_up_all(&notif_wait->notif_waitq);
117} 119}
120EXPORT_SYMBOL_GPL(iwl_notification_wait_notify);
118 121
119void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait) 122void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
120{ 123{
121 unsigned long flags;
122 struct iwl_notification_wait *wait_entry; 124 struct iwl_notification_wait *wait_entry;
123 125
124 spin_lock_irqsave(&notif_wait->notif_wait_lock, flags); 126 spin_lock(&notif_wait->notif_wait_lock);
125 list_for_each_entry(wait_entry, &notif_wait->notif_waits, list) 127 list_for_each_entry(wait_entry, &notif_wait->notif_waits, list)
126 wait_entry->aborted = true; 128 wait_entry->aborted = true;
127 spin_unlock_irqrestore(&notif_wait->notif_wait_lock, flags); 129 spin_unlock(&notif_wait->notif_wait_lock);
128 130
129 wake_up_all(&notif_wait->notif_waitq); 131 wake_up_all(&notif_wait->notif_waitq);
130} 132}
131 133EXPORT_SYMBOL_GPL(iwl_abort_notification_waits);
132 134
133void 135void
134iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait, 136iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
@@ -152,6 +154,7 @@ iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
152 list_add(&wait_entry->list, &notif_wait->notif_waits); 154 list_add(&wait_entry->list, &notif_wait->notif_waits);
153 spin_unlock_bh(&notif_wait->notif_wait_lock); 155 spin_unlock_bh(&notif_wait->notif_wait_lock);
154} 156}
157EXPORT_SYMBOL_GPL(iwl_init_notification_wait);
155 158
156int iwl_wait_notification(struct iwl_notif_wait_data *notif_wait, 159int iwl_wait_notification(struct iwl_notif_wait_data *notif_wait,
157 struct iwl_notification_wait *wait_entry, 160 struct iwl_notification_wait *wait_entry,
@@ -175,6 +178,7 @@ int iwl_wait_notification(struct iwl_notif_wait_data *notif_wait,
175 return -ETIMEDOUT; 178 return -ETIMEDOUT;
176 return 0; 179 return 0;
177} 180}
181EXPORT_SYMBOL_GPL(iwl_wait_notification);
178 182
179void iwl_remove_notification(struct iwl_notif_wait_data *notif_wait, 183void iwl_remove_notification(struct iwl_notif_wait_data *notif_wait,
180 struct iwl_notification_wait *wait_entry) 184 struct iwl_notification_wait *wait_entry)
@@ -183,3 +187,4 @@ void iwl_remove_notification(struct iwl_notif_wait_data *notif_wait,
183 list_del(&wait_entry->list); 187 list_del(&wait_entry->list);
184 spin_unlock_bh(&notif_wait->notif_wait_lock); 188 spin_unlock_bh(&notif_wait->notif_wait_lock);
185} 189}
190EXPORT_SYMBOL_GPL(iwl_remove_notification);
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index 4ef742b28e08..64886f95664f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -111,22 +111,25 @@ struct iwl_cfg;
111 * May sleep 111 * May sleep
112 * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the 112 * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
113 * HCMD the this Rx responds to. 113 * HCMD the this Rx responds to.
114 * Must be atomic. 114 * Must be atomic and called with BH disabled.
115 * @queue_full: notifies that a HW queue is full. 115 * @queue_full: notifies that a HW queue is full.
116 * Must be atomic 116 * Must be atomic and called with BH disabled.
117 * @queue_not_full: notifies that a HW queue is not full any more. 117 * @queue_not_full: notifies that a HW queue is not full any more.
118 * Must be atomic 118 * Must be atomic and called with BH disabled.
119 * @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that 119 * @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that
120 * the radio is killed. Must be atomic. 120 * the radio is killed. Must be atomic.
121 * @free_skb: allows the transport layer to free skbs that haven't been 121 * @free_skb: allows the transport layer to free skbs that haven't been
122 * reclaimed by the op_mode. This can happen when the driver is freed and 122 * reclaimed by the op_mode. This can happen when the driver is freed and
123 * there are Tx packets pending in the transport layer. 123 * there are Tx packets pending in the transport layer.
124 * Must be atomic 124 * Must be atomic
125 * @nic_error: error notification. Must be atomic 125 * @nic_error: error notification. Must be atomic and must be called with BH
126 * @cmd_queue_full: Called when the command queue gets full. Must be atomic. 126 * disabled.
127 * @cmd_queue_full: Called when the command queue gets full. Must be atomic and
128 * called with BH disabled.
127 * @nic_config: configure NIC, called before firmware is started. 129 * @nic_config: configure NIC, called before firmware is started.
128 * May sleep 130 * May sleep
129 * @wimax_active: invoked when WiMax becomes active. Must be atomic. 131 * @wimax_active: invoked when WiMax becomes active. Must be atomic and called
132 * with BH disabled.
130 */ 133 */
131struct iwl_op_mode_ops { 134struct iwl_op_mode_ops {
132 struct iwl_op_mode *(*start)(struct iwl_trans *trans, 135 struct iwl_op_mode *(*start)(struct iwl_trans *trans,
@@ -145,6 +148,9 @@ struct iwl_op_mode_ops {
145 void (*wimax_active)(struct iwl_op_mode *op_mode); 148 void (*wimax_active)(struct iwl_op_mode *op_mode);
146}; 149};
147 150
151int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops);
152void iwl_opmode_deregister(const char *name);
153
148/** 154/**
149 * struct iwl_op_mode - operational mode 155 * struct iwl_op_mode - operational mode
150 * 156 *
@@ -162,7 +168,6 @@ struct iwl_op_mode {
162static inline void iwl_op_mode_stop(struct iwl_op_mode *op_mode) 168static inline void iwl_op_mode_stop(struct iwl_op_mode *op_mode)
163{ 169{
164 might_sleep(); 170 might_sleep();
165
166 op_mode->ops->stop(op_mode); 171 op_mode->ops->stop(op_mode);
167} 172}
168 173
@@ -218,9 +223,4 @@ static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode)
218 op_mode->ops->wimax_active(op_mode); 223 op_mode->ops->wimax_active(op_mode);
219} 224}
220 225
221/*****************************************************
222* Op mode layers implementations
223******************************************************/
224extern const struct iwl_op_mode_ops iwl_dvm_ops;
225
226#endif /* __iwl_op_mode_h__ */ 226#endif /* __iwl_op_mode_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index dfd54662e3e6..9253ef1dba72 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -187,7 +187,7 @@
187#define SCD_QUEUE_STTS_REG_POS_ACTIVE (3) 187#define SCD_QUEUE_STTS_REG_POS_ACTIVE (3)
188#define SCD_QUEUE_STTS_REG_POS_WSL (4) 188#define SCD_QUEUE_STTS_REG_POS_WSL (4)
189#define SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19) 189#define SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19)
190#define SCD_QUEUE_STTS_REG_MSK (0x00FF0000) 190#define SCD_QUEUE_STTS_REG_MSK (0x017F0000)
191 191
192#define SCD_QUEUE_CTX_REG1_CREDIT_POS (8) 192#define SCD_QUEUE_CTX_REG1_CREDIT_POS (8)
193#define SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00) 193#define SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00)
diff --git a/drivers/net/wireless/iwlwifi/iwl-test.c b/drivers/net/wireless/iwlwifi/iwl-test.c
new file mode 100644
index 000000000000..81e8c7126d72
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-test.c
@@ -0,0 +1,856 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#include <linux/export.h>
65#include <net/netlink.h>
66
67#include "iwl-io.h"
68#include "iwl-fh.h"
69#include "iwl-prph.h"
70#include "iwl-trans.h"
71#include "iwl-test.h"
72#include "iwl-csr.h"
73#include "iwl-testmode.h"
74
75/*
76 * Periphery registers absolute lower bound. This is used in order to
77 * differentiate registery access through HBUS_TARG_PRPH_* and
78 * HBUS_TARG_MEM_* accesses.
79 */
80#define IWL_ABS_PRPH_START (0xA00000)
81
82/*
83 * The TLVs used in the gnl message policy between the kernel module and
84 * user space application. iwl_testmode_gnl_msg_policy is to be carried
85 * through the NL80211_CMD_TESTMODE channel regulated by nl80211.
86 * See iwl-testmode.h
87 */
88static
89struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
90 [IWL_TM_ATTR_COMMAND] = { .type = NLA_U32, },
91
92 [IWL_TM_ATTR_UCODE_CMD_ID] = { .type = NLA_U8, },
93 [IWL_TM_ATTR_UCODE_CMD_DATA] = { .type = NLA_UNSPEC, },
94
95 [IWL_TM_ATTR_REG_OFFSET] = { .type = NLA_U32, },
96 [IWL_TM_ATTR_REG_VALUE8] = { .type = NLA_U8, },
97 [IWL_TM_ATTR_REG_VALUE32] = { .type = NLA_U32, },
98
99 [IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, },
100 [IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, },
101
102 [IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, },
103
104 [IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, },
105 [IWL_TM_ATTR_TRACE_DUMP] = { .type = NLA_UNSPEC, },
106 [IWL_TM_ATTR_TRACE_SIZE] = { .type = NLA_U32, },
107
108 [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, },
109
110 [IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, },
111
112 [IWL_TM_ATTR_MEM_ADDR] = { .type = NLA_U32, },
113 [IWL_TM_ATTR_BUFFER_SIZE] = { .type = NLA_U32, },
114 [IWL_TM_ATTR_BUFFER_DUMP] = { .type = NLA_UNSPEC, },
115
116 [IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, },
117 [IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, },
118 [IWL_TM_ATTR_FW_TYPE] = { .type = NLA_U32, },
119 [IWL_TM_ATTR_FW_INST_SIZE] = { .type = NLA_U32, },
120 [IWL_TM_ATTR_FW_DATA_SIZE] = { .type = NLA_U32, },
121
122 [IWL_TM_ATTR_ENABLE_NOTIFICATION] = {.type = NLA_FLAG, },
123};
124
125static inline void iwl_test_trace_clear(struct iwl_test *tst)
126{
127 memset(&tst->trace, 0, sizeof(struct iwl_test_trace));
128}
129
130static void iwl_test_trace_stop(struct iwl_test *tst)
131{
132 if (!tst->trace.enabled)
133 return;
134
135 if (tst->trace.cpu_addr && tst->trace.dma_addr)
136 dma_free_coherent(tst->trans->dev,
137 tst->trace.tsize,
138 tst->trace.cpu_addr,
139 tst->trace.dma_addr);
140
141 iwl_test_trace_clear(tst);
142}
143
144static inline void iwl_test_mem_clear(struct iwl_test *tst)
145{
146 memset(&tst->mem, 0, sizeof(struct iwl_test_mem));
147}
148
149static inline void iwl_test_mem_stop(struct iwl_test *tst)
150{
151 if (!tst->mem.in_read)
152 return;
153
154 iwl_test_mem_clear(tst);
155}
156
157/*
158 * Initializes the test object
159 * During the lifetime of the test object it is assumed that the transport is
160 * started. The test object should be stopped before the transport is stopped.
161 */
162void iwl_test_init(struct iwl_test *tst, struct iwl_trans *trans,
163 struct iwl_test_ops *ops)
164{
165 tst->trans = trans;
166 tst->ops = ops;
167
168 iwl_test_trace_clear(tst);
169 iwl_test_mem_clear(tst);
170}
171EXPORT_SYMBOL_GPL(iwl_test_init);
172
173/*
174 * Stop the test object
175 */
176void iwl_test_free(struct iwl_test *tst)
177{
178 iwl_test_mem_stop(tst);
179 iwl_test_trace_stop(tst);
180}
181EXPORT_SYMBOL_GPL(iwl_test_free);
182
183static inline int iwl_test_send_cmd(struct iwl_test *tst,
184 struct iwl_host_cmd *cmd)
185{
186 return tst->ops->send_cmd(tst->trans->op_mode, cmd);
187}
188
189static inline bool iwl_test_valid_hw_addr(struct iwl_test *tst, u32 addr)
190{
191 return tst->ops->valid_hw_addr(addr);
192}
193
194static inline u32 iwl_test_fw_ver(struct iwl_test *tst)
195{
196 return tst->ops->get_fw_ver(tst->trans->op_mode);
197}
198
199static inline struct sk_buff*
200iwl_test_alloc_reply(struct iwl_test *tst, int len)
201{
202 return tst->ops->alloc_reply(tst->trans->op_mode, len);
203}
204
205static inline int iwl_test_reply(struct iwl_test *tst, struct sk_buff *skb)
206{
207 return tst->ops->reply(tst->trans->op_mode, skb);
208}
209
210static inline struct sk_buff*
211iwl_test_alloc_event(struct iwl_test *tst, int len)
212{
213 return tst->ops->alloc_event(tst->trans->op_mode, len);
214}
215
216static inline void
217iwl_test_event(struct iwl_test *tst, struct sk_buff *skb)
218{
219 return tst->ops->event(tst->trans->op_mode, skb);
220}
221
222/*
223 * This function handles the user application commands to the fw. The fw
224 * commands are sent in a synchronuous manner. In case that the user requested
225 * to get commands response, it is send to the user.
226 */
227static int iwl_test_fw_cmd(struct iwl_test *tst, struct nlattr **tb)
228{
229 struct iwl_host_cmd cmd;
230 struct iwl_rx_packet *pkt;
231 struct sk_buff *skb;
232 void *reply_buf;
233 u32 reply_len;
234 int ret;
235 bool cmd_want_skb;
236
237 memset(&cmd, 0, sizeof(struct iwl_host_cmd));
238
239 if (!tb[IWL_TM_ATTR_UCODE_CMD_ID] ||
240 !tb[IWL_TM_ATTR_UCODE_CMD_DATA]) {
241 IWL_ERR(tst->trans, "Missing fw command mandatory fields\n");
242 return -ENOMSG;
243 }
244
245 cmd.flags = CMD_ON_DEMAND | CMD_SYNC;
246 cmd_want_skb = nla_get_flag(tb[IWL_TM_ATTR_UCODE_CMD_SKB]);
247 if (cmd_want_skb)
248 cmd.flags |= CMD_WANT_SKB;
249
250 cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]);
251 cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
252 cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
253 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
254 IWL_DEBUG_INFO(tst->trans, "test fw cmd=0x%x, flags 0x%x, len %d\n",
255 cmd.id, cmd.flags, cmd.len[0]);
256
257 ret = iwl_test_send_cmd(tst, &cmd);
258 if (ret) {
259 IWL_ERR(tst->trans, "Failed to send hcmd\n");
260 return ret;
261 }
262 if (!cmd_want_skb)
263 return ret;
264
265 /* Handling return of SKB to the user */
266 pkt = cmd.resp_pkt;
267 if (!pkt) {
268 IWL_ERR(tst->trans, "HCMD received a null response packet\n");
269 return ret;
270 }
271
272 reply_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
273 skb = iwl_test_alloc_reply(tst, reply_len + 20);
274 reply_buf = kmalloc(reply_len, GFP_KERNEL);
275 if (!skb || !reply_buf) {
276 kfree_skb(skb);
277 kfree(reply_buf);
278 return -ENOMEM;
279 }
280
281 /* The reply is in a page, that we cannot send to user space. */
282 memcpy(reply_buf, &(pkt->hdr), reply_len);
283 iwl_free_resp(&cmd);
284
285 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
286 IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
287 nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf))
288 goto nla_put_failure;
289 return iwl_test_reply(tst, skb);
290
291nla_put_failure:
292 IWL_DEBUG_INFO(tst->trans, "Failed creating NL attributes\n");
293 kfree(reply_buf);
294 kfree_skb(skb);
295 return -ENOMSG;
296}
297
298/*
299 * Handles the user application commands for register access.
300 */
301static int iwl_test_reg(struct iwl_test *tst, struct nlattr **tb)
302{
303 u32 ofs, val32, cmd;
304 u8 val8;
305 struct sk_buff *skb;
306 int status = 0;
307 struct iwl_trans *trans = tst->trans;
308
309 if (!tb[IWL_TM_ATTR_REG_OFFSET]) {
310 IWL_ERR(trans, "Missing reg offset\n");
311 return -ENOMSG;
312 }
313
314 ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]);
315 IWL_DEBUG_INFO(trans, "test reg access cmd offset=0x%x\n", ofs);
316
317 cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
318
319 /*
320 * Allow access only to FH/CSR/HBUS in direct mode.
321 * Since we don't have the upper bounds for the CSR and HBUS segments,
322 * we will use only the upper bound of FH for sanity check.
323 */
324 if (ofs >= FH_MEM_UPPER_BOUND) {
325 IWL_ERR(trans, "offset out of segment (0x0 - 0x%x)\n",
326 FH_MEM_UPPER_BOUND);
327 return -EINVAL;
328 }
329
330 switch (cmd) {
331 case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
332 val32 = iwl_read_direct32(tst->trans, ofs);
333 IWL_DEBUG_INFO(trans, "32 value to read 0x%x\n", val32);
334
335 skb = iwl_test_alloc_reply(tst, 20);
336 if (!skb) {
337 IWL_ERR(trans, "Memory allocation fail\n");
338 return -ENOMEM;
339 }
340 if (nla_put_u32(skb, IWL_TM_ATTR_REG_VALUE32, val32))
341 goto nla_put_failure;
342 status = iwl_test_reply(tst, skb);
343 if (status < 0)
344 IWL_ERR(trans, "Error sending msg : %d\n", status);
345 break;
346
347 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
348 if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
349 IWL_ERR(trans, "Missing value to write\n");
350 return -ENOMSG;
351 } else {
352 val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
353 IWL_DEBUG_INFO(trans, "32b write val=0x%x\n", val32);
354 iwl_write_direct32(tst->trans, ofs, val32);
355 }
356 break;
357
358 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
359 if (!tb[IWL_TM_ATTR_REG_VALUE8]) {
360 IWL_ERR(trans, "Missing value to write\n");
361 return -ENOMSG;
362 } else {
363 val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
364 IWL_DEBUG_INFO(trans, "8b write val=0x%x\n", val8);
365 iwl_write8(tst->trans, ofs, val8);
366 }
367 break;
368
369 default:
370 IWL_ERR(trans, "Unknown test register cmd ID\n");
371 return -ENOMSG;
372 }
373
374 return status;
375
376nla_put_failure:
377 kfree_skb(skb);
378 return -EMSGSIZE;
379}
380
381/*
382 * Handles the request to start FW tracing. Allocates of the trace buffer
383 * and sends a reply to user space with the address of the allocated buffer.
384 */
385static int iwl_test_trace_begin(struct iwl_test *tst, struct nlattr **tb)
386{
387 struct sk_buff *skb;
388 int status = 0;
389
390 if (tst->trace.enabled)
391 return -EBUSY;
392
393 if (!tb[IWL_TM_ATTR_TRACE_SIZE])
394 tst->trace.size = TRACE_BUFF_SIZE_DEF;
395 else
396 tst->trace.size =
397 nla_get_u32(tb[IWL_TM_ATTR_TRACE_SIZE]);
398
399 if (!tst->trace.size)
400 return -EINVAL;
401
402 if (tst->trace.size < TRACE_BUFF_SIZE_MIN ||
403 tst->trace.size > TRACE_BUFF_SIZE_MAX)
404 return -EINVAL;
405
406 tst->trace.tsize = tst->trace.size + TRACE_BUFF_PADD;
407 tst->trace.cpu_addr = dma_alloc_coherent(tst->trans->dev,
408 tst->trace.tsize,
409 &tst->trace.dma_addr,
410 GFP_KERNEL);
411 if (!tst->trace.cpu_addr)
412 return -ENOMEM;
413
414 tst->trace.enabled = true;
415 tst->trace.trace_addr = (u8 *)PTR_ALIGN(tst->trace.cpu_addr, 0x100);
416
417 memset(tst->trace.trace_addr, 0x03B, tst->trace.size);
418
419 skb = iwl_test_alloc_reply(tst, sizeof(tst->trace.dma_addr) + 20);
420 if (!skb) {
421 IWL_ERR(tst->trans, "Memory allocation fail\n");
422 iwl_test_trace_stop(tst);
423 return -ENOMEM;
424 }
425
426 if (nla_put(skb, IWL_TM_ATTR_TRACE_ADDR,
427 sizeof(tst->trace.dma_addr),
428 (u64 *)&tst->trace.dma_addr))
429 goto nla_put_failure;
430
431 status = iwl_test_reply(tst, skb);
432 if (status < 0)
433 IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
434
435 tst->trace.nchunks = DIV_ROUND_UP(tst->trace.size,
436 DUMP_CHUNK_SIZE);
437
438 return status;
439
440nla_put_failure:
441 kfree_skb(skb);
442 if (nla_get_u32(tb[IWL_TM_ATTR_COMMAND]) ==
443 IWL_TM_CMD_APP2DEV_BEGIN_TRACE)
444 iwl_test_trace_stop(tst);
445 return -EMSGSIZE;
446}
447
448/*
449 * Handles indirect read from the periphery or the SRAM. The read is performed
450 * to a temporary buffer. The user space application should later issue a dump
451 */
452static int iwl_test_indirect_read(struct iwl_test *tst, u32 addr, u32 size)
453{
454 struct iwl_trans *trans = tst->trans;
455 unsigned long flags;
456 int i;
457
458 if (size & 0x3)
459 return -EINVAL;
460
461 tst->mem.size = size;
462 tst->mem.addr = kmalloc(tst->mem.size, GFP_KERNEL);
463 if (tst->mem.addr == NULL)
464 return -ENOMEM;
465
466 /* Hard-coded periphery absolute address */
467 if (IWL_ABS_PRPH_START <= addr &&
468 addr < IWL_ABS_PRPH_START + PRPH_END) {
469 spin_lock_irqsave(&trans->reg_lock, flags);
470 iwl_grab_nic_access(trans);
471 iwl_write32(trans, HBUS_TARG_PRPH_RADDR,
472 addr | (3 << 24));
473 for (i = 0; i < size; i += 4)
474 *(u32 *)(tst->mem.addr + i) =
475 iwl_read32(trans, HBUS_TARG_PRPH_RDAT);
476 iwl_release_nic_access(trans);
477 spin_unlock_irqrestore(&trans->reg_lock, flags);
478 } else { /* target memory (SRAM) */
479 _iwl_read_targ_mem_dwords(trans, addr,
480 tst->mem.addr,
481 tst->mem.size / 4);
482 }
483
484 tst->mem.nchunks =
485 DIV_ROUND_UP(tst->mem.size, DUMP_CHUNK_SIZE);
486 tst->mem.in_read = true;
487 return 0;
488
489}
490
491/*
492 * Handles indirect write to the periphery or SRAM. The is performed to a
493 * temporary buffer.
494 */
495static int iwl_test_indirect_write(struct iwl_test *tst, u32 addr,
496 u32 size, unsigned char *buf)
497{
498 struct iwl_trans *trans = tst->trans;
499 u32 val, i;
500 unsigned long flags;
501
502 if (IWL_ABS_PRPH_START <= addr &&
503 addr < IWL_ABS_PRPH_START + PRPH_END) {
504 /* Periphery writes can be 1-3 bytes long, or DWORDs */
505 if (size < 4) {
506 memcpy(&val, buf, size);
507 spin_lock_irqsave(&trans->reg_lock, flags);
508 iwl_grab_nic_access(trans);
509 iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
510 (addr & 0x0000FFFF) |
511 ((size - 1) << 24));
512 iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
513 iwl_release_nic_access(trans);
514 /* needed after consecutive writes w/o read */
515 mmiowb();
516 spin_unlock_irqrestore(&trans->reg_lock, flags);
517 } else {
518 if (size % 4)
519 return -EINVAL;
520 for (i = 0; i < size; i += 4)
521 iwl_write_prph(trans, addr+i,
522 *(u32 *)(buf+i));
523 }
524 } else if (iwl_test_valid_hw_addr(tst, addr)) {
525 _iwl_write_targ_mem_dwords(trans, addr, buf, size / 4);
526 } else {
527 return -EINVAL;
528 }
529 return 0;
530}
531
532/*
533 * Handles the user application commands for indirect read/write
534 * to/from the periphery or the SRAM.
535 */
536static int iwl_test_indirect_mem(struct iwl_test *tst, struct nlattr **tb)
537{
538 u32 addr, size, cmd;
539 unsigned char *buf;
540
541 /* Both read and write should be blocked, for atomicity */
542 if (tst->mem.in_read)
543 return -EBUSY;
544
545 cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
546 if (!tb[IWL_TM_ATTR_MEM_ADDR]) {
547 IWL_ERR(tst->trans, "Error finding memory offset address\n");
548 return -ENOMSG;
549 }
550 addr = nla_get_u32(tb[IWL_TM_ATTR_MEM_ADDR]);
551 if (!tb[IWL_TM_ATTR_BUFFER_SIZE]) {
552 IWL_ERR(tst->trans, "Error finding size for memory reading\n");
553 return -ENOMSG;
554 }
555 size = nla_get_u32(tb[IWL_TM_ATTR_BUFFER_SIZE]);
556
557 if (cmd == IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ) {
558 return iwl_test_indirect_read(tst, addr, size);
559 } else {
560 if (!tb[IWL_TM_ATTR_BUFFER_DUMP])
561 return -EINVAL;
562 buf = (unsigned char *)nla_data(tb[IWL_TM_ATTR_BUFFER_DUMP]);
563 return iwl_test_indirect_write(tst, addr, size, buf);
564 }
565}
566
567/*
568 * Enable notifications to user space
569 */
570static int iwl_test_notifications(struct iwl_test *tst,
571 struct nlattr **tb)
572{
573 tst->notify = nla_get_flag(tb[IWL_TM_ATTR_ENABLE_NOTIFICATION]);
574 return 0;
575}
576
577/*
578 * Handles the request to get the device id
579 */
580static int iwl_test_get_dev_id(struct iwl_test *tst, struct nlattr **tb)
581{
582 u32 devid = tst->trans->hw_id;
583 struct sk_buff *skb;
584 int status;
585
586 IWL_DEBUG_INFO(tst->trans, "hw version: 0x%x\n", devid);
587
588 skb = iwl_test_alloc_reply(tst, 20);
589 if (!skb) {
590 IWL_ERR(tst->trans, "Memory allocation fail\n");
591 return -ENOMEM;
592 }
593
594 if (nla_put_u32(skb, IWL_TM_ATTR_DEVICE_ID, devid))
595 goto nla_put_failure;
596 status = iwl_test_reply(tst, skb);
597 if (status < 0)
598 IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
599
600 return 0;
601
602nla_put_failure:
603 kfree_skb(skb);
604 return -EMSGSIZE;
605}
606
607/*
608 * Handles the request to get the FW version
609 */
610static int iwl_test_get_fw_ver(struct iwl_test *tst, struct nlattr **tb)
611{
612 struct sk_buff *skb;
613 int status;
614 u32 ver = iwl_test_fw_ver(tst);
615
616 IWL_DEBUG_INFO(tst->trans, "uCode version raw: 0x%x\n", ver);
617
618 skb = iwl_test_alloc_reply(tst, 20);
619 if (!skb) {
620 IWL_ERR(tst->trans, "Memory allocation fail\n");
621 return -ENOMEM;
622 }
623
624 if (nla_put_u32(skb, IWL_TM_ATTR_FW_VERSION, ver))
625 goto nla_put_failure;
626
627 status = iwl_test_reply(tst, skb);
628 if (status < 0)
629 IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
630
631 return 0;
632
633nla_put_failure:
634 kfree_skb(skb);
635 return -EMSGSIZE;
636}
637
638/*
639 * Parse the netlink message and validate that the IWL_TM_ATTR_CMD exists
640 */
641int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb,
642 void *data, int len)
643{
644 int result;
645
646 result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
647 iwl_testmode_gnl_msg_policy);
648 if (result) {
649 IWL_ERR(tst->trans, "Fail parse gnl msg: %d\n", result);
650 return result;
651 }
652
653 /* IWL_TM_ATTR_COMMAND is absolutely mandatory */
654 if (!tb[IWL_TM_ATTR_COMMAND]) {
655 IWL_ERR(tst->trans, "Missing testmode command type\n");
656 return -ENOMSG;
657 }
658 return 0;
659}
660EXPORT_SYMBOL_GPL(iwl_test_parse);
661
662/*
663 * Handle test commands.
664 * Returns 1 for unknown commands (not handled by the test object); negative
665 * value in case of error.
666 */
667int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb)
668{
669 int result;
670
671 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
672 case IWL_TM_CMD_APP2DEV_UCODE:
673 IWL_DEBUG_INFO(tst->trans, "test cmd to uCode\n");
674 result = iwl_test_fw_cmd(tst, tb);
675 break;
676
677 case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
678 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
679 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
680 IWL_DEBUG_INFO(tst->trans, "test cmd to register\n");
681 result = iwl_test_reg(tst, tb);
682 break;
683
684 case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
685 IWL_DEBUG_INFO(tst->trans, "test uCode trace cmd to driver\n");
686 result = iwl_test_trace_begin(tst, tb);
687 break;
688
689 case IWL_TM_CMD_APP2DEV_END_TRACE:
690 iwl_test_trace_stop(tst);
691 result = 0;
692 break;
693
694 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
695 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
696 IWL_DEBUG_INFO(tst->trans, "test indirect memory cmd\n");
697 result = iwl_test_indirect_mem(tst, tb);
698 break;
699
700 case IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
701 IWL_DEBUG_INFO(tst->trans, "test notifications cmd\n");
702 result = iwl_test_notifications(tst, tb);
703 break;
704
705 case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
706 IWL_DEBUG_INFO(tst->trans, "test get FW ver cmd\n");
707 result = iwl_test_get_fw_ver(tst, tb);
708 break;
709
710 case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
711 IWL_DEBUG_INFO(tst->trans, "test Get device ID cmd\n");
712 result = iwl_test_get_dev_id(tst, tb);
713 break;
714
715 default:
716 IWL_DEBUG_INFO(tst->trans, "Unknown test command\n");
717 result = 1;
718 break;
719 }
720 return result;
721}
722EXPORT_SYMBOL_GPL(iwl_test_handle_cmd);
723
724static int iwl_test_trace_dump(struct iwl_test *tst, struct sk_buff *skb,
725 struct netlink_callback *cb)
726{
727 int idx, length;
728
729 if (!tst->trace.enabled || !tst->trace.trace_addr)
730 return -EFAULT;
731
732 idx = cb->args[4];
733 if (idx >= tst->trace.nchunks)
734 return -ENOENT;
735
736 length = DUMP_CHUNK_SIZE;
737 if (((idx + 1) == tst->trace.nchunks) &&
738 (tst->trace.size % DUMP_CHUNK_SIZE))
739 length = tst->trace.size %
740 DUMP_CHUNK_SIZE;
741
742 if (nla_put(skb, IWL_TM_ATTR_TRACE_DUMP, length,
743 tst->trace.trace_addr + (DUMP_CHUNK_SIZE * idx)))
744 goto nla_put_failure;
745
746 cb->args[4] = ++idx;
747 return 0;
748
749 nla_put_failure:
750 return -ENOBUFS;
751}
752
753static int iwl_test_buffer_dump(struct iwl_test *tst, struct sk_buff *skb,
754 struct netlink_callback *cb)
755{
756 int idx, length;
757
758 if (!tst->mem.in_read)
759 return -EFAULT;
760
761 idx = cb->args[4];
762 if (idx >= tst->mem.nchunks) {
763 iwl_test_mem_stop(tst);
764 return -ENOENT;
765 }
766
767 length = DUMP_CHUNK_SIZE;
768 if (((idx + 1) == tst->mem.nchunks) &&
769 (tst->mem.size % DUMP_CHUNK_SIZE))
770 length = tst->mem.size % DUMP_CHUNK_SIZE;
771
772 if (nla_put(skb, IWL_TM_ATTR_BUFFER_DUMP, length,
773 tst->mem.addr + (DUMP_CHUNK_SIZE * idx)))
774 goto nla_put_failure;
775
776 cb->args[4] = ++idx;
777 return 0;
778
779 nla_put_failure:
780 return -ENOBUFS;
781}
782
783/*
784 * Handle dump commands.
785 * Returns 1 for unknown commands (not handled by the test object); negative
786 * value in case of error.
787 */
788int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb,
789 struct netlink_callback *cb)
790{
791 int result;
792
793 switch (cmd) {
794 case IWL_TM_CMD_APP2DEV_READ_TRACE:
795 IWL_DEBUG_INFO(tst->trans, "uCode trace cmd\n");
796 result = iwl_test_trace_dump(tst, skb, cb);
797 break;
798
799 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP:
800 IWL_DEBUG_INFO(tst->trans, "testmode sram dump cmd\n");
801 result = iwl_test_buffer_dump(tst, skb, cb);
802 break;
803
804 default:
805 result = 1;
806 break;
807 }
808 return result;
809}
810EXPORT_SYMBOL_GPL(iwl_test_dump);
811
812/*
813 * Multicast a spontaneous messages from the device to the user space.
814 */
815static void iwl_test_send_rx(struct iwl_test *tst,
816 struct iwl_rx_cmd_buffer *rxb)
817{
818 struct sk_buff *skb;
819 struct iwl_rx_packet *data;
820 int length;
821
822 data = rxb_addr(rxb);
823 length = le32_to_cpu(data->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
824
825 /* the length doesn't include len_n_flags field, so add it manually */
826 length += sizeof(__le32);
827
828 skb = iwl_test_alloc_event(tst, length + 20);
829 if (skb == NULL) {
830 IWL_ERR(tst->trans, "Out of memory for message to user\n");
831 return;
832 }
833
834 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
835 IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
836 nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, length, data))
837 goto nla_put_failure;
838
839 iwl_test_event(tst, skb);
840 return;
841
842nla_put_failure:
843 kfree_skb(skb);
844 IWL_ERR(tst->trans, "Ouch, overran buffer, check allocation!\n");
845}
846
847/*
848 * Called whenever a Rx frames is recevied from the device. If notifications to
849 * the user space are requested, sends the frames to the user.
850 */
851void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb)
852{
853 if (tst->notify)
854 iwl_test_send_rx(tst, rxb);
855}
856EXPORT_SYMBOL_GPL(iwl_test_rx);
diff --git a/drivers/net/wireless/iwlwifi/iwl-test.h b/drivers/net/wireless/iwlwifi/iwl-test.h
new file mode 100644
index 000000000000..e13ffa8acc02
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-test.h
@@ -0,0 +1,161 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#ifndef __IWL_TEST_H__
65#define __IWL_TEST_H__
66
67#include <linux/types.h>
68#include "iwl-trans.h"
69
70struct iwl_test_trace {
71 u32 size;
72 u32 tsize;
73 u32 nchunks;
74 u8 *cpu_addr;
75 u8 *trace_addr;
76 dma_addr_t dma_addr;
77 bool enabled;
78};
79
80struct iwl_test_mem {
81 u32 size;
82 u32 nchunks;
83 u8 *addr;
84 bool in_read;
85};
86
87/*
88 * struct iwl_test_ops: callback to the op mode
89 *
90 * The structure defines the callbacks that the op_mode should handle,
91 * inorder to handle logic that is out of the scope of iwl_test. The
92 * op_mode must set all the callbacks.
93
94 * @send_cmd: handler that is used by the test object to request the
95 * op_mode to send a command to the fw.
96 *
97 * @valid_hw_addr: handler that is used by the test object to request the
98 * op_mode to check if the given address is a valid address.
99 *
100 * @get_fw_ver: handler used to get the FW version.
101 *
102 * @alloc_reply: handler used by the test object to request the op_mode
103 * to allocate an skb for sending a reply to the user, and initialize
104 * the skb. It is assumed that the test object only fills the required
105 * attributes.
106 *
107 * @reply: handler used by the test object to request the op_mode to reply
108 * to a request. The skb is an skb previously allocated by the the
109 * alloc_reply callback.
110 I
111 * @alloc_event: handler used by the test object to request the op_mode
112 * to allocate an skb for sending an event, and initialize
113 * the skb. It is assumed that the test object only fills the required
114 * attributes.
115 *
116 * @reply: handler used by the test object to request the op_mode to send
117 * an event. The skb is an skb previously allocated by the the
118 * alloc_event callback.
119 */
120struct iwl_test_ops {
121 int (*send_cmd)(struct iwl_op_mode *op_modes,
122 struct iwl_host_cmd *cmd);
123 bool (*valid_hw_addr)(u32 addr);
124 u32 (*get_fw_ver)(struct iwl_op_mode *op_mode);
125
126 struct sk_buff *(*alloc_reply)(struct iwl_op_mode *op_mode, int len);
127 int (*reply)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
128 struct sk_buff* (*alloc_event)(struct iwl_op_mode *op_mode, int len);
129 void (*event)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
130};
131
132struct iwl_test {
133 struct iwl_trans *trans;
134 struct iwl_test_ops *ops;
135 struct iwl_test_trace trace;
136 struct iwl_test_mem mem;
137 bool notify;
138};
139
140void iwl_test_init(struct iwl_test *tst, struct iwl_trans *trans,
141 struct iwl_test_ops *ops);
142
143void iwl_test_free(struct iwl_test *tst);
144
145int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb,
146 void *data, int len);
147
148int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb);
149
150int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb,
151 struct netlink_callback *cb);
152
153void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb);
154
155static inline void iwl_test_enable_notifications(struct iwl_test *tst,
156 bool enable)
157{
158 tst->notify = enable;
159}
160
161#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.c b/drivers/net/wireless/iwlwifi/iwl-testmode.c
deleted file mode 100644
index 060aac3e22f1..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-testmode.c
+++ /dev/null
@@ -1,1114 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#include <linux/init.h>
64#include <linux/kernel.h>
65#include <linux/module.h>
66#include <linux/dma-mapping.h>
67#include <net/net_namespace.h>
68#include <linux/netdevice.h>
69#include <net/cfg80211.h>
70#include <net/mac80211.h>
71#include <net/netlink.h>
72
73#include "iwl-dev.h"
74#include "iwl-debug.h"
75#include "iwl-io.h"
76#include "iwl-agn.h"
77#include "iwl-testmode.h"
78#include "iwl-trans.h"
79#include "iwl-fh.h"
80#include "iwl-prph.h"
81
82
83/* Periphery registers absolute lower bound. This is used in order to
84 * differentiate registery access through HBUS_TARG_PRPH_* and
85 * HBUS_TARG_MEM_* accesses.
86 */
87#define IWL_TM_ABS_PRPH_START (0xA00000)
88
89/* The TLVs used in the gnl message policy between the kernel module and
90 * user space application. iwl_testmode_gnl_msg_policy is to be carried
91 * through the NL80211_CMD_TESTMODE channel regulated by nl80211.
92 * See iwl-testmode.h
93 */
94static
95struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
96 [IWL_TM_ATTR_COMMAND] = { .type = NLA_U32, },
97
98 [IWL_TM_ATTR_UCODE_CMD_ID] = { .type = NLA_U8, },
99 [IWL_TM_ATTR_UCODE_CMD_DATA] = { .type = NLA_UNSPEC, },
100
101 [IWL_TM_ATTR_REG_OFFSET] = { .type = NLA_U32, },
102 [IWL_TM_ATTR_REG_VALUE8] = { .type = NLA_U8, },
103 [IWL_TM_ATTR_REG_VALUE32] = { .type = NLA_U32, },
104
105 [IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, },
106 [IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, },
107
108 [IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, },
109
110 [IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, },
111 [IWL_TM_ATTR_TRACE_DUMP] = { .type = NLA_UNSPEC, },
112 [IWL_TM_ATTR_TRACE_SIZE] = { .type = NLA_U32, },
113
114 [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, },
115
116 [IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, },
117
118 [IWL_TM_ATTR_MEM_ADDR] = { .type = NLA_U32, },
119 [IWL_TM_ATTR_BUFFER_SIZE] = { .type = NLA_U32, },
120 [IWL_TM_ATTR_BUFFER_DUMP] = { .type = NLA_UNSPEC, },
121
122 [IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, },
123 [IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, },
124 [IWL_TM_ATTR_FW_TYPE] = { .type = NLA_U32, },
125 [IWL_TM_ATTR_FW_INST_SIZE] = { .type = NLA_U32, },
126 [IWL_TM_ATTR_FW_DATA_SIZE] = { .type = NLA_U32, },
127
128 [IWL_TM_ATTR_ENABLE_NOTIFICATION] = {.type = NLA_FLAG, },
129};
130
131/*
132 * See the struct iwl_rx_packet in iwl-commands.h for the format of the
133 * received events from the device
134 */
135static inline int get_event_length(struct iwl_rx_cmd_buffer *rxb)
136{
137 struct iwl_rx_packet *pkt = rxb_addr(rxb);
138 if (pkt)
139 return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
140 else
141 return 0;
142}
143
144
145/*
146 * This function multicasts the spontaneous messages from the device to the
147 * user space. It is invoked whenever there is a received messages
148 * from the device. This function is called within the ISR of the rx handlers
149 * in iwlagn driver.
150 *
151 * The parsing of the message content is left to the user space application,
152 * The message content is treated as unattacked raw data and is encapsulated
153 * with IWL_TM_ATTR_UCODE_RX_PKT multicasting to the user space.
154 *
155 * @priv: the instance of iwlwifi device
156 * @rxb: pointer to rx data content received by the ISR
157 *
158 * See the message policies and TLVs in iwl_testmode_gnl_msg_policy[].
159 * For the messages multicasting to the user application, the mandatory
160 * TLV fields are :
161 * IWL_TM_ATTR_COMMAND must be IWL_TM_CMD_DEV2APP_UCODE_RX_PKT
162 * IWL_TM_ATTR_UCODE_RX_PKT for carrying the message content
163 */
164
165static void iwl_testmode_ucode_rx_pkt(struct iwl_priv *priv,
166 struct iwl_rx_cmd_buffer *rxb)
167{
168 struct ieee80211_hw *hw = priv->hw;
169 struct sk_buff *skb;
170 void *data;
171 int length;
172
173 data = (void *)rxb_addr(rxb);
174 length = get_event_length(rxb);
175
176 if (!data || length == 0)
177 return;
178
179 skb = cfg80211_testmode_alloc_event_skb(hw->wiphy, 20 + length,
180 GFP_ATOMIC);
181 if (skb == NULL) {
182 IWL_ERR(priv,
183 "Run out of memory for messages to user space ?\n");
184 return;
185 }
186 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
187 /* the length doesn't include len_n_flags field, so add it manually */
188 nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, length + sizeof(__le32), data))
189 goto nla_put_failure;
190 cfg80211_testmode_event(skb, GFP_ATOMIC);
191 return;
192
193nla_put_failure:
194 kfree_skb(skb);
195 IWL_ERR(priv, "Ouch, overran buffer, check allocation!\n");
196}
197
198void iwl_testmode_init(struct iwl_priv *priv)
199{
200 priv->pre_rx_handler = NULL;
201 priv->testmode_trace.trace_enabled = false;
202 priv->testmode_mem.read_in_progress = false;
203}
204
205static void iwl_mem_cleanup(struct iwl_priv *priv)
206{
207 if (priv->testmode_mem.read_in_progress) {
208 kfree(priv->testmode_mem.buff_addr);
209 priv->testmode_mem.buff_addr = NULL;
210 priv->testmode_mem.buff_size = 0;
211 priv->testmode_mem.num_chunks = 0;
212 priv->testmode_mem.read_in_progress = false;
213 }
214}
215
216static void iwl_trace_cleanup(struct iwl_priv *priv)
217{
218 if (priv->testmode_trace.trace_enabled) {
219 if (priv->testmode_trace.cpu_addr &&
220 priv->testmode_trace.dma_addr)
221 dma_free_coherent(priv->trans->dev,
222 priv->testmode_trace.total_size,
223 priv->testmode_trace.cpu_addr,
224 priv->testmode_trace.dma_addr);
225 priv->testmode_trace.trace_enabled = false;
226 priv->testmode_trace.cpu_addr = NULL;
227 priv->testmode_trace.trace_addr = NULL;
228 priv->testmode_trace.dma_addr = 0;
229 priv->testmode_trace.buff_size = 0;
230 priv->testmode_trace.total_size = 0;
231 }
232}
233
234
235void iwl_testmode_cleanup(struct iwl_priv *priv)
236{
237 iwl_trace_cleanup(priv);
238 iwl_mem_cleanup(priv);
239}
240
241
242/*
243 * This function handles the user application commands to the ucode.
244 *
245 * It retrieves the mandatory fields IWL_TM_ATTR_UCODE_CMD_ID and
246 * IWL_TM_ATTR_UCODE_CMD_DATA and calls to the handler to send the
247 * host command to the ucode.
248 *
249 * If any mandatory field is missing, -ENOMSG is replied to the user space
250 * application; otherwise, waits for the host command to be sent and checks
251 * the return code. In case or error, it is returned, otherwise a reply is
252 * allocated and the reply RX packet
253 * is returned.
254 *
255 * @hw: ieee80211_hw object that represents the device
256 * @tb: gnl message fields from the user space
257 */
258static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb)
259{
260 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
261 struct iwl_host_cmd cmd;
262 struct iwl_rx_packet *pkt;
263 struct sk_buff *skb;
264 void *reply_buf;
265 u32 reply_len;
266 int ret;
267 bool cmd_want_skb;
268
269 memset(&cmd, 0, sizeof(struct iwl_host_cmd));
270
271 if (!tb[IWL_TM_ATTR_UCODE_CMD_ID] ||
272 !tb[IWL_TM_ATTR_UCODE_CMD_DATA]) {
273 IWL_ERR(priv, "Missing ucode command mandatory fields\n");
274 return -ENOMSG;
275 }
276
277 cmd.flags = CMD_ON_DEMAND | CMD_SYNC;
278 cmd_want_skb = nla_get_flag(tb[IWL_TM_ATTR_UCODE_CMD_SKB]);
279 if (cmd_want_skb)
280 cmd.flags |= CMD_WANT_SKB;
281
282 cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]);
283 cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
284 cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
285 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
286 IWL_DEBUG_INFO(priv, "testmode ucode command ID 0x%x, flags 0x%x,"
287 " len %d\n", cmd.id, cmd.flags, cmd.len[0]);
288
289 ret = iwl_dvm_send_cmd(priv, &cmd);
290 if (ret) {
291 IWL_ERR(priv, "Failed to send hcmd\n");
292 return ret;
293 }
294 if (!cmd_want_skb)
295 return ret;
296
297 /* Handling return of SKB to the user */
298 pkt = cmd.resp_pkt;
299 if (!pkt) {
300 IWL_ERR(priv, "HCMD received a null response packet\n");
301 return ret;
302 }
303
304 reply_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
305 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, reply_len + 20);
306 reply_buf = kmalloc(reply_len, GFP_KERNEL);
307 if (!skb || !reply_buf) {
308 kfree_skb(skb);
309 kfree(reply_buf);
310 return -ENOMEM;
311 }
312
313 /* The reply is in a page, that we cannot send to user space. */
314 memcpy(reply_buf, &(pkt->hdr), reply_len);
315 iwl_free_resp(&cmd);
316
317 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
318 nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf))
319 goto nla_put_failure;
320 return cfg80211_testmode_reply(skb);
321
322nla_put_failure:
323 IWL_DEBUG_INFO(priv, "Failed creating NL attributes\n");
324 return -ENOMSG;
325}
326
327
328/*
329 * This function handles the user application commands for register access.
330 *
331 * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
332 * handlers respectively.
333 *
334 * If it's an unknown commdn ID, -ENOSYS is returned; or -ENOMSG if the
335 * mandatory fields(IWL_TM_ATTR_REG_OFFSET,IWL_TM_ATTR_REG_VALUE32,
336 * IWL_TM_ATTR_REG_VALUE8) are missing; Otherwise 0 is replied indicating
337 * the success of the command execution.
338 *
339 * If IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_REG_READ32, the register read
340 * value is returned with IWL_TM_ATTR_REG_VALUE32.
341 *
342 * @hw: ieee80211_hw object that represents the device
343 * @tb: gnl message fields from the user space
344 */
345static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
346{
347 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
348 u32 ofs, val32, cmd;
349 u8 val8;
350 struct sk_buff *skb;
351 int status = 0;
352
353 if (!tb[IWL_TM_ATTR_REG_OFFSET]) {
354 IWL_ERR(priv, "Missing register offset\n");
355 return -ENOMSG;
356 }
357 ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]);
358 IWL_INFO(priv, "testmode register access command offset 0x%x\n", ofs);
359
360 /* Allow access only to FH/CSR/HBUS in direct mode.
361 Since we don't have the upper bounds for the CSR and HBUS segments,
362 we will use only the upper bound of FH for sanity check. */
363 cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
364 if ((cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32 ||
365 cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32 ||
366 cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8) &&
367 (ofs >= FH_MEM_UPPER_BOUND)) {
368 IWL_ERR(priv, "offset out of segment (0x0 - 0x%x)\n",
369 FH_MEM_UPPER_BOUND);
370 return -EINVAL;
371 }
372
373 switch (cmd) {
374 case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
375 val32 = iwl_read_direct32(priv->trans, ofs);
376 IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
377
378 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
379 if (!skb) {
380 IWL_ERR(priv, "Memory allocation fail\n");
381 return -ENOMEM;
382 }
383 if (nla_put_u32(skb, IWL_TM_ATTR_REG_VALUE32, val32))
384 goto nla_put_failure;
385 status = cfg80211_testmode_reply(skb);
386 if (status < 0)
387 IWL_ERR(priv, "Error sending msg : %d\n", status);
388 break;
389 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
390 if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
391 IWL_ERR(priv, "Missing value to write\n");
392 return -ENOMSG;
393 } else {
394 val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
395 IWL_INFO(priv, "32bit value to write 0x%x\n", val32);
396 iwl_write_direct32(priv->trans, ofs, val32);
397 }
398 break;
399 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
400 if (!tb[IWL_TM_ATTR_REG_VALUE8]) {
401 IWL_ERR(priv, "Missing value to write\n");
402 return -ENOMSG;
403 } else {
404 val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
405 IWL_INFO(priv, "8bit value to write 0x%x\n", val8);
406 iwl_write8(priv->trans, ofs, val8);
407 }
408 break;
409 default:
410 IWL_ERR(priv, "Unknown testmode register command ID\n");
411 return -ENOSYS;
412 }
413
414 return status;
415
416nla_put_failure:
417 kfree_skb(skb);
418 return -EMSGSIZE;
419}
420
421
422static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
423{
424 struct iwl_notification_wait calib_wait;
425 static const u8 calib_complete[] = {
426 CALIBRATION_COMPLETE_NOTIFICATION
427 };
428 int ret;
429
430 iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
431 calib_complete, ARRAY_SIZE(calib_complete),
432 NULL, NULL);
433 ret = iwl_init_alive_start(priv);
434 if (ret) {
435 IWL_ERR(priv, "Fail init calibration: %d\n", ret);
436 goto cfg_init_calib_error;
437 }
438
439 ret = iwl_wait_notification(&priv->notif_wait, &calib_wait, 2 * HZ);
440 if (ret)
441 IWL_ERR(priv, "Error detecting"
442 " CALIBRATION_COMPLETE_NOTIFICATION: %d\n", ret);
443 return ret;
444
445cfg_init_calib_error:
446 iwl_remove_notification(&priv->notif_wait, &calib_wait);
447 return ret;
448}
449
450/*
451 * This function handles the user application commands for driver.
452 *
453 * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
454 * handlers respectively.
455 *
456 * If it's an unknown commdn ID, -ENOSYS is replied; otherwise, the returned
457 * value of the actual command execution is replied to the user application.
458 *
459 * If there's any message responding to the user space, IWL_TM_ATTR_SYNC_RSP
460 * is used for carry the message while IWL_TM_ATTR_COMMAND must set to
461 * IWL_TM_CMD_DEV2APP_SYNC_RSP.
462 *
463 * @hw: ieee80211_hw object that represents the device
464 * @tb: gnl message fields from the user space
465 */
466static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
467{
468 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
469 struct iwl_trans *trans = priv->trans;
470 struct sk_buff *skb;
471 unsigned char *rsp_data_ptr = NULL;
472 int status = 0, rsp_data_len = 0;
473 u32 devid, inst_size = 0, data_size = 0;
474 const struct fw_img *img;
475
476 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
477 case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
478 rsp_data_ptr = (unsigned char *)priv->cfg->name;
479 rsp_data_len = strlen(priv->cfg->name);
480 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
481 rsp_data_len + 20);
482 if (!skb) {
483 IWL_ERR(priv, "Memory allocation fail\n");
484 return -ENOMEM;
485 }
486 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
487 IWL_TM_CMD_DEV2APP_SYNC_RSP) ||
488 nla_put(skb, IWL_TM_ATTR_SYNC_RSP,
489 rsp_data_len, rsp_data_ptr))
490 goto nla_put_failure;
491 status = cfg80211_testmode_reply(skb);
492 if (status < 0)
493 IWL_ERR(priv, "Error sending msg : %d\n", status);
494 break;
495
496 case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
497 status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_INIT);
498 if (status)
499 IWL_ERR(priv, "Error loading init ucode: %d\n", status);
500 break;
501
502 case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
503 iwl_testmode_cfg_init_calib(priv);
504 priv->ucode_loaded = false;
505 iwl_trans_stop_device(trans);
506 break;
507
508 case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
509 status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR);
510 if (status) {
511 IWL_ERR(priv,
512 "Error loading runtime ucode: %d\n", status);
513 break;
514 }
515 status = iwl_alive_start(priv);
516 if (status)
517 IWL_ERR(priv,
518 "Error starting the device: %d\n", status);
519 break;
520
521 case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
522 iwl_scan_cancel_timeout(priv, 200);
523 priv->ucode_loaded = false;
524 iwl_trans_stop_device(trans);
525 status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_WOWLAN);
526 if (status) {
527 IWL_ERR(priv,
528 "Error loading WOWLAN ucode: %d\n", status);
529 break;
530 }
531 status = iwl_alive_start(priv);
532 if (status)
533 IWL_ERR(priv,
534 "Error starting the device: %d\n", status);
535 break;
536
537 case IWL_TM_CMD_APP2DEV_GET_EEPROM:
538 if (priv->eeprom) {
539 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
540 priv->cfg->base_params->eeprom_size + 20);
541 if (!skb) {
542 IWL_ERR(priv, "Memory allocation fail\n");
543 return -ENOMEM;
544 }
545 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
546 IWL_TM_CMD_DEV2APP_EEPROM_RSP) ||
547 nla_put(skb, IWL_TM_ATTR_EEPROM,
548 priv->cfg->base_params->eeprom_size,
549 priv->eeprom))
550 goto nla_put_failure;
551 status = cfg80211_testmode_reply(skb);
552 if (status < 0)
553 IWL_ERR(priv, "Error sending msg : %d\n",
554 status);
555 } else
556 return -EFAULT;
557 break;
558
559 case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
560 if (!tb[IWL_TM_ATTR_FIXRATE]) {
561 IWL_ERR(priv, "Missing fixrate setting\n");
562 return -ENOMSG;
563 }
564 priv->tm_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]);
565 break;
566
567 case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
568 IWL_INFO(priv, "uCode version raw: 0x%x\n",
569 priv->fw->ucode_ver);
570
571 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
572 if (!skb) {
573 IWL_ERR(priv, "Memory allocation fail\n");
574 return -ENOMEM;
575 }
576 if (nla_put_u32(skb, IWL_TM_ATTR_FW_VERSION,
577 priv->fw->ucode_ver))
578 goto nla_put_failure;
579 status = cfg80211_testmode_reply(skb);
580 if (status < 0)
581 IWL_ERR(priv, "Error sending msg : %d\n", status);
582 break;
583
584 case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
585 devid = priv->trans->hw_id;
586 IWL_INFO(priv, "hw version: 0x%x\n", devid);
587
588 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
589 if (!skb) {
590 IWL_ERR(priv, "Memory allocation fail\n");
591 return -ENOMEM;
592 }
593 if (nla_put_u32(skb, IWL_TM_ATTR_DEVICE_ID, devid))
594 goto nla_put_failure;
595 status = cfg80211_testmode_reply(skb);
596 if (status < 0)
597 IWL_ERR(priv, "Error sending msg : %d\n", status);
598 break;
599
600 case IWL_TM_CMD_APP2DEV_GET_FW_INFO:
601 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20 + 8);
602 if (!skb) {
603 IWL_ERR(priv, "Memory allocation fail\n");
604 return -ENOMEM;
605 }
606 if (!priv->ucode_loaded) {
607 IWL_ERR(priv, "No uCode has not been loaded\n");
608 return -EINVAL;
609 } else {
610 img = &priv->fw->img[priv->cur_ucode];
611 inst_size = img->sec[IWL_UCODE_SECTION_INST].len;
612 data_size = img->sec[IWL_UCODE_SECTION_DATA].len;
613 }
614 if (nla_put_u32(skb, IWL_TM_ATTR_FW_TYPE, priv->cur_ucode) ||
615 nla_put_u32(skb, IWL_TM_ATTR_FW_INST_SIZE, inst_size) ||
616 nla_put_u32(skb, IWL_TM_ATTR_FW_DATA_SIZE, data_size))
617 goto nla_put_failure;
618 status = cfg80211_testmode_reply(skb);
619 if (status < 0)
620 IWL_ERR(priv, "Error sending msg : %d\n", status);
621 break;
622
623 default:
624 IWL_ERR(priv, "Unknown testmode driver command ID\n");
625 return -ENOSYS;
626 }
627 return status;
628
629nla_put_failure:
630 kfree_skb(skb);
631 return -EMSGSIZE;
632}
633
634
635/*
636 * This function handles the user application commands for uCode trace
637 *
638 * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
639 * handlers respectively.
640 *
641 * If it's an unknown commdn ID, -ENOSYS is replied; otherwise, the returned
642 * value of the actual command execution is replied to the user application.
643 *
644 * @hw: ieee80211_hw object that represents the device
645 * @tb: gnl message fields from the user space
646 */
647static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
648{
649 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
650 struct sk_buff *skb;
651 int status = 0;
652 struct device *dev = priv->trans->dev;
653
654 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
655 case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
656 if (priv->testmode_trace.trace_enabled)
657 return -EBUSY;
658
659 if (!tb[IWL_TM_ATTR_TRACE_SIZE])
660 priv->testmode_trace.buff_size = TRACE_BUFF_SIZE_DEF;
661 else
662 priv->testmode_trace.buff_size =
663 nla_get_u32(tb[IWL_TM_ATTR_TRACE_SIZE]);
664 if (!priv->testmode_trace.buff_size)
665 return -EINVAL;
666 if (priv->testmode_trace.buff_size < TRACE_BUFF_SIZE_MIN ||
667 priv->testmode_trace.buff_size > TRACE_BUFF_SIZE_MAX)
668 return -EINVAL;
669
670 priv->testmode_trace.total_size =
671 priv->testmode_trace.buff_size + TRACE_BUFF_PADD;
672 priv->testmode_trace.cpu_addr =
673 dma_alloc_coherent(dev,
674 priv->testmode_trace.total_size,
675 &priv->testmode_trace.dma_addr,
676 GFP_KERNEL);
677 if (!priv->testmode_trace.cpu_addr)
678 return -ENOMEM;
679 priv->testmode_trace.trace_enabled = true;
680 priv->testmode_trace.trace_addr = (u8 *)PTR_ALIGN(
681 priv->testmode_trace.cpu_addr, 0x100);
682 memset(priv->testmode_trace.trace_addr, 0x03B,
683 priv->testmode_trace.buff_size);
684 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
685 sizeof(priv->testmode_trace.dma_addr) + 20);
686 if (!skb) {
687 IWL_ERR(priv, "Memory allocation fail\n");
688 iwl_trace_cleanup(priv);
689 return -ENOMEM;
690 }
691 if (nla_put(skb, IWL_TM_ATTR_TRACE_ADDR,
692 sizeof(priv->testmode_trace.dma_addr),
693 (u64 *)&priv->testmode_trace.dma_addr))
694 goto nla_put_failure;
695 status = cfg80211_testmode_reply(skb);
696 if (status < 0) {
697 IWL_ERR(priv, "Error sending msg : %d\n", status);
698 }
699 priv->testmode_trace.num_chunks =
700 DIV_ROUND_UP(priv->testmode_trace.buff_size,
701 DUMP_CHUNK_SIZE);
702 break;
703
704 case IWL_TM_CMD_APP2DEV_END_TRACE:
705 iwl_trace_cleanup(priv);
706 break;
707 default:
708 IWL_ERR(priv, "Unknown testmode mem command ID\n");
709 return -ENOSYS;
710 }
711 return status;
712
713nla_put_failure:
714 kfree_skb(skb);
715 if (nla_get_u32(tb[IWL_TM_ATTR_COMMAND]) ==
716 IWL_TM_CMD_APP2DEV_BEGIN_TRACE)
717 iwl_trace_cleanup(priv);
718 return -EMSGSIZE;
719}
720
721static int iwl_testmode_trace_dump(struct ieee80211_hw *hw,
722 struct sk_buff *skb,
723 struct netlink_callback *cb)
724{
725 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
726 int idx, length;
727
728 if (priv->testmode_trace.trace_enabled &&
729 priv->testmode_trace.trace_addr) {
730 idx = cb->args[4];
731 if (idx >= priv->testmode_trace.num_chunks)
732 return -ENOENT;
733 length = DUMP_CHUNK_SIZE;
734 if (((idx + 1) == priv->testmode_trace.num_chunks) &&
735 (priv->testmode_trace.buff_size % DUMP_CHUNK_SIZE))
736 length = priv->testmode_trace.buff_size %
737 DUMP_CHUNK_SIZE;
738
739 if (nla_put(skb, IWL_TM_ATTR_TRACE_DUMP, length,
740 priv->testmode_trace.trace_addr +
741 (DUMP_CHUNK_SIZE * idx)))
742 goto nla_put_failure;
743 idx++;
744 cb->args[4] = idx;
745 return 0;
746 } else
747 return -EFAULT;
748
749 nla_put_failure:
750 return -ENOBUFS;
751}
752
753/*
754 * This function handles the user application switch ucode ownership.
755 *
756 * It retrieves the mandatory fields IWL_TM_ATTR_UCODE_OWNER and
757 * decide who the current owner of the uCode
758 *
759 * If the current owner is OWNERSHIP_TM, then the only host command
760 * can deliver to uCode is from testmode, all the other host commands
761 * will dropped.
762 *
763 * default driver is the owner of uCode in normal operational mode
764 *
765 * @hw: ieee80211_hw object that represents the device
766 * @tb: gnl message fields from the user space
767 */
768static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
769{
770 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
771 u8 owner;
772
773 if (!tb[IWL_TM_ATTR_UCODE_OWNER]) {
774 IWL_ERR(priv, "Missing ucode owner\n");
775 return -ENOMSG;
776 }
777
778 owner = nla_get_u8(tb[IWL_TM_ATTR_UCODE_OWNER]);
779 if (owner == IWL_OWNERSHIP_DRIVER) {
780 priv->ucode_owner = owner;
781 priv->pre_rx_handler = NULL;
782 } else if (owner == IWL_OWNERSHIP_TM) {
783 priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt;
784 priv->ucode_owner = owner;
785 } else {
786 IWL_ERR(priv, "Invalid owner\n");
787 return -EINVAL;
788 }
789 return 0;
790}
791
792static int iwl_testmode_indirect_read(struct iwl_priv *priv, u32 addr, u32 size)
793{
794 struct iwl_trans *trans = priv->trans;
795 unsigned long flags;
796 int i;
797
798 if (size & 0x3)
799 return -EINVAL;
800 priv->testmode_mem.buff_size = size;
801 priv->testmode_mem.buff_addr =
802 kmalloc(priv->testmode_mem.buff_size, GFP_KERNEL);
803 if (priv->testmode_mem.buff_addr == NULL)
804 return -ENOMEM;
805
806 /* Hard-coded periphery absolute address */
807 if (IWL_TM_ABS_PRPH_START <= addr &&
808 addr < IWL_TM_ABS_PRPH_START + PRPH_END) {
809 spin_lock_irqsave(&trans->reg_lock, flags);
810 iwl_grab_nic_access(trans);
811 iwl_write32(trans, HBUS_TARG_PRPH_RADDR,
812 addr | (3 << 24));
813 for (i = 0; i < size; i += 4)
814 *(u32 *)(priv->testmode_mem.buff_addr + i) =
815 iwl_read32(trans, HBUS_TARG_PRPH_RDAT);
816 iwl_release_nic_access(trans);
817 spin_unlock_irqrestore(&trans->reg_lock, flags);
818 } else { /* target memory (SRAM) */
819 _iwl_read_targ_mem_words(trans, addr,
820 priv->testmode_mem.buff_addr,
821 priv->testmode_mem.buff_size / 4);
822 }
823
824 priv->testmode_mem.num_chunks =
825 DIV_ROUND_UP(priv->testmode_mem.buff_size, DUMP_CHUNK_SIZE);
826 priv->testmode_mem.read_in_progress = true;
827 return 0;
828
829}
830
831static int iwl_testmode_indirect_write(struct iwl_priv *priv, u32 addr,
832 u32 size, unsigned char *buf)
833{
834 struct iwl_trans *trans = priv->trans;
835 u32 val, i;
836 unsigned long flags;
837
838 if (IWL_TM_ABS_PRPH_START <= addr &&
839 addr < IWL_TM_ABS_PRPH_START + PRPH_END) {
840 /* Periphery writes can be 1-3 bytes long, or DWORDs */
841 if (size < 4) {
842 memcpy(&val, buf, size);
843 spin_lock_irqsave(&trans->reg_lock, flags);
844 iwl_grab_nic_access(trans);
845 iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
846 (addr & 0x0000FFFF) |
847 ((size - 1) << 24));
848 iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
849 iwl_release_nic_access(trans);
850 /* needed after consecutive writes w/o read */
851 mmiowb();
852 spin_unlock_irqrestore(&trans->reg_lock, flags);
853 } else {
854 if (size % 4)
855 return -EINVAL;
856 for (i = 0; i < size; i += 4)
857 iwl_write_prph(trans, addr+i,
858 *(u32 *)(buf+i));
859 }
860 } else if (iwlagn_hw_valid_rtc_data_addr(addr) ||
861 (IWLAGN_RTC_INST_LOWER_BOUND <= addr &&
862 addr < IWLAGN_RTC_INST_UPPER_BOUND)) {
863 _iwl_write_targ_mem_words(trans, addr, buf, size/4);
864 } else
865 return -EINVAL;
866 return 0;
867}
868
869/*
870 * This function handles the user application commands for SRAM data dump
871 *
872 * It retrieves the mandatory fields IWL_TM_ATTR_SRAM_ADDR and
873 * IWL_TM_ATTR_SRAM_SIZE to decide the memory area for SRAM data reading
874 *
875 * Several error will be retured, -EBUSY if the SRAM data retrieved by
876 * previous command has not been delivered to userspace, or -ENOMSG if
877 * the mandatory fields (IWL_TM_ATTR_SRAM_ADDR,IWL_TM_ATTR_SRAM_SIZE)
878 * are missing, or -ENOMEM if the buffer allocation fails.
879 *
880 * Otherwise 0 is replied indicating the success of the SRAM reading.
881 *
882 * @hw: ieee80211_hw object that represents the device
883 * @tb: gnl message fields from the user space
884 */
885static int iwl_testmode_indirect_mem(struct ieee80211_hw *hw,
886 struct nlattr **tb)
887{
888 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
889 u32 addr, size, cmd;
890 unsigned char *buf;
891
892 /* Both read and write should be blocked, for atomicity */
893 if (priv->testmode_mem.read_in_progress)
894 return -EBUSY;
895
896 cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
897 if (!tb[IWL_TM_ATTR_MEM_ADDR]) {
898 IWL_ERR(priv, "Error finding memory offset address\n");
899 return -ENOMSG;
900 }
901 addr = nla_get_u32(tb[IWL_TM_ATTR_MEM_ADDR]);
902 if (!tb[IWL_TM_ATTR_BUFFER_SIZE]) {
903 IWL_ERR(priv, "Error finding size for memory reading\n");
904 return -ENOMSG;
905 }
906 size = nla_get_u32(tb[IWL_TM_ATTR_BUFFER_SIZE]);
907
908 if (cmd == IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ)
909 return iwl_testmode_indirect_read(priv, addr, size);
910 else {
911 if (!tb[IWL_TM_ATTR_BUFFER_DUMP])
912 return -EINVAL;
913 buf = (unsigned char *) nla_data(tb[IWL_TM_ATTR_BUFFER_DUMP]);
914 return iwl_testmode_indirect_write(priv, addr, size, buf);
915 }
916}
917
918static int iwl_testmode_buffer_dump(struct ieee80211_hw *hw,
919 struct sk_buff *skb,
920 struct netlink_callback *cb)
921{
922 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
923 int idx, length;
924
925 if (priv->testmode_mem.read_in_progress) {
926 idx = cb->args[4];
927 if (idx >= priv->testmode_mem.num_chunks) {
928 iwl_mem_cleanup(priv);
929 return -ENOENT;
930 }
931 length = DUMP_CHUNK_SIZE;
932 if (((idx + 1) == priv->testmode_mem.num_chunks) &&
933 (priv->testmode_mem.buff_size % DUMP_CHUNK_SIZE))
934 length = priv->testmode_mem.buff_size %
935 DUMP_CHUNK_SIZE;
936
937 if (nla_put(skb, IWL_TM_ATTR_BUFFER_DUMP, length,
938 priv->testmode_mem.buff_addr +
939 (DUMP_CHUNK_SIZE * idx)))
940 goto nla_put_failure;
941 idx++;
942 cb->args[4] = idx;
943 return 0;
944 } else
945 return -EFAULT;
946
947 nla_put_failure:
948 return -ENOBUFS;
949}
950
951static int iwl_testmode_notifications(struct ieee80211_hw *hw,
952 struct nlattr **tb)
953{
954 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
955 bool enable;
956
957 enable = nla_get_flag(tb[IWL_TM_ATTR_ENABLE_NOTIFICATION]);
958 if (enable)
959 priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt;
960 else
961 priv->pre_rx_handler = NULL;
962 return 0;
963}
964
965
966/* The testmode gnl message handler that takes the gnl message from the
967 * user space and parses it per the policy iwl_testmode_gnl_msg_policy, then
968 * invoke the corresponding handlers.
969 *
970 * This function is invoked when there is user space application sending
971 * gnl message through the testmode tunnel NL80211_CMD_TESTMODE regulated
972 * by nl80211.
973 *
974 * It retrieves the mandatory field, IWL_TM_ATTR_COMMAND, before
975 * dispatching it to the corresponding handler.
976 *
977 * If IWL_TM_ATTR_COMMAND is missing, -ENOMSG is replied to user application;
978 * -ENOSYS is replied to the user application if the command is unknown;
979 * Otherwise, the command is dispatched to the respective handler.
980 *
981 * @hw: ieee80211_hw object that represents the device
982 * @data: pointer to user space message
983 * @len: length in byte of @data
984 */
985int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
986{
987 struct nlattr *tb[IWL_TM_ATTR_MAX];
988 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
989 int result;
990
991 result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
992 iwl_testmode_gnl_msg_policy);
993 if (result != 0) {
994 IWL_ERR(priv, "Error parsing the gnl message : %d\n", result);
995 return result;
996 }
997
998 /* IWL_TM_ATTR_COMMAND is absolutely mandatory */
999 if (!tb[IWL_TM_ATTR_COMMAND]) {
1000 IWL_ERR(priv, "Missing testmode command type\n");
1001 return -ENOMSG;
1002 }
1003 /* in case multiple accesses to the device happens */
1004 mutex_lock(&priv->mutex);
1005
1006 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
1007 case IWL_TM_CMD_APP2DEV_UCODE:
1008 IWL_DEBUG_INFO(priv, "testmode cmd to uCode\n");
1009 result = iwl_testmode_ucode(hw, tb);
1010 break;
1011 case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
1012 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
1013 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
1014 IWL_DEBUG_INFO(priv, "testmode cmd to register\n");
1015 result = iwl_testmode_reg(hw, tb);
1016 break;
1017 case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
1018 case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
1019 case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
1020 case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
1021 case IWL_TM_CMD_APP2DEV_GET_EEPROM:
1022 case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
1023 case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
1024 case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
1025 case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
1026 case IWL_TM_CMD_APP2DEV_GET_FW_INFO:
1027 IWL_DEBUG_INFO(priv, "testmode cmd to driver\n");
1028 result = iwl_testmode_driver(hw, tb);
1029 break;
1030
1031 case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
1032 case IWL_TM_CMD_APP2DEV_END_TRACE:
1033 case IWL_TM_CMD_APP2DEV_READ_TRACE:
1034 IWL_DEBUG_INFO(priv, "testmode uCode trace cmd to driver\n");
1035 result = iwl_testmode_trace(hw, tb);
1036 break;
1037
1038 case IWL_TM_CMD_APP2DEV_OWNERSHIP:
1039 IWL_DEBUG_INFO(priv, "testmode change uCode ownership\n");
1040 result = iwl_testmode_ownership(hw, tb);
1041 break;
1042
1043 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
1044 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
1045 IWL_DEBUG_INFO(priv, "testmode indirect memory cmd "
1046 "to driver\n");
1047 result = iwl_testmode_indirect_mem(hw, tb);
1048 break;
1049
1050 case IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
1051 IWL_DEBUG_INFO(priv, "testmode notifications cmd "
1052 "to driver\n");
1053 result = iwl_testmode_notifications(hw, tb);
1054 break;
1055
1056 default:
1057 IWL_ERR(priv, "Unknown testmode command\n");
1058 result = -ENOSYS;
1059 break;
1060 }
1061
1062 mutex_unlock(&priv->mutex);
1063 return result;
1064}
1065
1066int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
1067 struct netlink_callback *cb,
1068 void *data, int len)
1069{
1070 struct nlattr *tb[IWL_TM_ATTR_MAX];
1071 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1072 int result;
1073 u32 cmd;
1074
1075 if (cb->args[3]) {
1076 /* offset by 1 since commands start at 0 */
1077 cmd = cb->args[3] - 1;
1078 } else {
1079 result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
1080 iwl_testmode_gnl_msg_policy);
1081 if (result) {
1082 IWL_ERR(priv,
1083 "Error parsing the gnl message : %d\n", result);
1084 return result;
1085 }
1086
1087 /* IWL_TM_ATTR_COMMAND is absolutely mandatory */
1088 if (!tb[IWL_TM_ATTR_COMMAND]) {
1089 IWL_ERR(priv, "Missing testmode command type\n");
1090 return -ENOMSG;
1091 }
1092 cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
1093 cb->args[3] = cmd + 1;
1094 }
1095
1096 /* in case multiple accesses to the device happens */
1097 mutex_lock(&priv->mutex);
1098 switch (cmd) {
1099 case IWL_TM_CMD_APP2DEV_READ_TRACE:
1100 IWL_DEBUG_INFO(priv, "uCode trace cmd to driver\n");
1101 result = iwl_testmode_trace_dump(hw, skb, cb);
1102 break;
1103 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP:
1104 IWL_DEBUG_INFO(priv, "testmode sram dump cmd to driver\n");
1105 result = iwl_testmode_buffer_dump(hw, skb, cb);
1106 break;
1107 default:
1108 result = -EINVAL;
1109 break;
1110 }
1111
1112 mutex_unlock(&priv->mutex);
1113 return result;
1114}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 79a1e7ae4995..867d8e194da4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -154,6 +154,9 @@ struct iwl_cmd_header {
154 __le16 sequence; 154 __le16 sequence;
155} __packed; 155} __packed;
156 156
157/* iwl_cmd_header flags value */
158#define IWL_CMD_FAILED_MSK 0x40
159
157 160
158#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */ 161#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
159#define FH_RSCSR_FRAME_INVALID 0x55550000 162#define FH_RSCSR_FRAME_INVALID 0x55550000
@@ -280,21 +283,24 @@ static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
280 283
281#define MAX_NO_RECLAIM_CMDS 6 284#define MAX_NO_RECLAIM_CMDS 6
282 285
286#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
287
283/* 288/*
284 * Maximum number of HW queues the transport layer 289 * Maximum number of HW queues the transport layer
285 * currently supports 290 * currently supports
286 */ 291 */
287#define IWL_MAX_HW_QUEUES 32 292#define IWL_MAX_HW_QUEUES 32
293#define IWL_INVALID_STATION 255
294#define IWL_MAX_TID_COUNT 8
295#define IWL_FRAME_LIMIT 64
288 296
289/** 297/**
290 * struct iwl_trans_config - transport configuration 298 * struct iwl_trans_config - transport configuration
291 * 299 *
292 * @op_mode: pointer to the upper layer. 300 * @op_mode: pointer to the upper layer.
293 * @queue_to_fifo: queue to FIFO mapping to set up by
294 * default
295 * @n_queue_to_fifo: number of queues to set up
296 * @cmd_queue: the index of the command queue. 301 * @cmd_queue: the index of the command queue.
297 * Must be set before start_fw. 302 * Must be set before start_fw.
303 * @cmd_fifo: the fifo for host commands
298 * @no_reclaim_cmds: Some devices erroneously don't set the 304 * @no_reclaim_cmds: Some devices erroneously don't set the
299 * SEQ_RX_FRAME bit on some notifications, this is the 305 * SEQ_RX_FRAME bit on some notifications, this is the
300 * list of such notifications to filter. Max length is 306 * list of such notifications to filter. Max length is
@@ -309,10 +315,9 @@ static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
309 */ 315 */
310struct iwl_trans_config { 316struct iwl_trans_config {
311 struct iwl_op_mode *op_mode; 317 struct iwl_op_mode *op_mode;
312 const u8 *queue_to_fifo;
313 u8 n_queue_to_fifo;
314 318
315 u8 cmd_queue; 319 u8 cmd_queue;
320 u8 cmd_fifo;
316 const u8 *no_reclaim_cmds; 321 const u8 *no_reclaim_cmds;
317 int n_no_reclaim_cmds; 322 int n_no_reclaim_cmds;
318 323
@@ -350,10 +355,10 @@ struct iwl_trans;
350 * Must be atomic 355 * Must be atomic
351 * @reclaim: free packet until ssn. Returns a list of freed packets. 356 * @reclaim: free packet until ssn. Returns a list of freed packets.
352 * Must be atomic 357 * Must be atomic
353 * @tx_agg_setup: setup a tx queue for AMPDU - will be called once the HW is 358 * @txq_enable: setup a queue. To setup an AC queue, use the
354 * ready and a successful ADDBA response has been received. 359 * iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
355 * May sleep 360 * this one. The op_mode must not configure the HCMD queue. May sleep.
356 * @tx_agg_disable: de-configure a Tx queue to send AMPDUs 361 * @txq_disable: de-configure a Tx queue to send AMPDUs
357 * Must be atomic 362 * Must be atomic
358 * @wait_tx_queue_empty: wait until all tx queues are empty 363 * @wait_tx_queue_empty: wait until all tx queues are empty
359 * May sleep 364 * May sleep
@@ -386,9 +391,9 @@ struct iwl_trans_ops {
386 void (*reclaim)(struct iwl_trans *trans, int queue, int ssn, 391 void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
387 struct sk_buff_head *skbs); 392 struct sk_buff_head *skbs);
388 393
389 void (*tx_agg_setup)(struct iwl_trans *trans, int queue, int fifo, 394 void (*txq_enable)(struct iwl_trans *trans, int queue, int fifo,
390 int sta_id, int tid, int frame_limit, u16 ssn); 395 int sta_id, int tid, int frame_limit, u16 ssn);
391 void (*tx_agg_disable)(struct iwl_trans *trans, int queue); 396 void (*txq_disable)(struct iwl_trans *trans, int queue);
392 397
393 int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir); 398 int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
394 int (*wait_tx_queue_empty)(struct iwl_trans *trans); 399 int (*wait_tx_queue_empty)(struct iwl_trans *trans);
@@ -428,6 +433,11 @@ enum iwl_trans_state {
428 * @hw_id_str: a string with info about HW ID. Set during transport allocation. 433 * @hw_id_str: a string with info about HW ID. Set during transport allocation.
429 * @pm_support: set to true in start_hw if link pm is supported 434 * @pm_support: set to true in start_hw if link pm is supported
430 * @wait_command_queue: the wait_queue for SYNC host commands 435 * @wait_command_queue: the wait_queue for SYNC host commands
436 * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
437 * The user should use iwl_trans_{alloc,free}_tx_cmd.
438 * @dev_cmd_headroom: room needed for the transport's private use before the
439 * device_cmd for Tx - for internal use only
440 * The user should use iwl_trans_{alloc,free}_tx_cmd.
431 */ 441 */
432struct iwl_trans { 442struct iwl_trans {
433 const struct iwl_trans_ops *ops; 443 const struct iwl_trans_ops *ops;
@@ -445,6 +455,10 @@ struct iwl_trans {
445 455
446 wait_queue_head_t wait_command_queue; 456 wait_queue_head_t wait_command_queue;
447 457
458 /* The following fields are internal only */
459 struct kmem_cache *dev_cmd_pool;
460 size_t dev_cmd_headroom;
461
448 /* pointer to trans specific struct */ 462 /* pointer to trans specific struct */
449 /*Ensure that this pointer will always be aligned to sizeof pointer */ 463 /*Ensure that this pointer will always be aligned to sizeof pointer */
450 char trans_specific[0] __aligned(sizeof(void *)); 464 char trans_specific[0] __aligned(sizeof(void *));
@@ -483,9 +497,9 @@ static inline void iwl_trans_fw_alive(struct iwl_trans *trans)
483{ 497{
484 might_sleep(); 498 might_sleep();
485 499
486 trans->ops->fw_alive(trans);
487
488 trans->state = IWL_TRANS_FW_ALIVE; 500 trans->state = IWL_TRANS_FW_ALIVE;
501
502 trans->ops->fw_alive(trans);
489} 503}
490 504
491static inline int iwl_trans_start_fw(struct iwl_trans *trans, 505static inline int iwl_trans_start_fw(struct iwl_trans *trans,
@@ -520,6 +534,26 @@ static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
520 return trans->ops->send_cmd(trans, cmd); 534 return trans->ops->send_cmd(trans, cmd);
521} 535}
522 536
537static inline struct iwl_device_cmd *
538iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
539{
540 u8 *dev_cmd_ptr = kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
541
542 if (unlikely(dev_cmd_ptr == NULL))
543 return NULL;
544
545 return (struct iwl_device_cmd *)
546 (dev_cmd_ptr + trans->dev_cmd_headroom);
547}
548
549static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
550 struct iwl_device_cmd *dev_cmd)
551{
552 u8 *dev_cmd_ptr = (u8 *)dev_cmd - trans->dev_cmd_headroom;
553
554 kmem_cache_free(trans->dev_cmd_pool, dev_cmd_ptr);
555}
556
523static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, 557static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
524 struct iwl_device_cmd *dev_cmd, int queue) 558 struct iwl_device_cmd *dev_cmd, int queue)
525{ 559{
@@ -538,27 +572,34 @@ static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
538 trans->ops->reclaim(trans, queue, ssn, skbs); 572 trans->ops->reclaim(trans, queue, ssn, skbs);
539} 573}
540 574
541static inline void iwl_trans_tx_agg_disable(struct iwl_trans *trans, int queue) 575static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue)
542{ 576{
543 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 577 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
544 "%s bad state = %d", __func__, trans->state); 578 "%s bad state = %d", __func__, trans->state);
545 579
546 trans->ops->tx_agg_disable(trans, queue); 580 trans->ops->txq_disable(trans, queue);
547} 581}
548 582
549static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans, int queue, 583static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
550 int fifo, int sta_id, int tid, 584 int fifo, int sta_id, int tid,
551 int frame_limit, u16 ssn) 585 int frame_limit, u16 ssn)
552{ 586{
553 might_sleep(); 587 might_sleep();
554 588
555 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 589 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
556 "%s bad state = %d", __func__, trans->state); 590 "%s bad state = %d", __func__, trans->state);
557 591
558 trans->ops->tx_agg_setup(trans, queue, fifo, sta_id, tid, 592 trans->ops->txq_enable(trans, queue, fifo, sta_id, tid,
559 frame_limit, ssn); 593 frame_limit, ssn);
560} 594}
561 595
596static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue,
597 int fifo)
598{
599 iwl_trans_txq_enable(trans, queue, fifo, IWL_INVALID_STATION,
600 IWL_MAX_TID_COUNT, IWL_FRAME_LIMIT, 0);
601}
602
562static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans) 603static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
563{ 604{
564 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 605 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/pcie/1000.c
index 2629a6602dfa..81b83f484f08 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/pcie/1000.c
@@ -27,9 +27,9 @@
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/stringify.h> 28#include <linux/stringify.h>
29#include "iwl-config.h" 29#include "iwl-config.h"
30#include "iwl-cfg.h"
31#include "iwl-csr.h" 30#include "iwl-csr.h"
32#include "iwl-agn-hw.h" 31#include "iwl-agn-hw.h"
32#include "cfg.h"
33 33
34/* Highest firmware API version supported */ 34/* Highest firmware API version supported */
35#define IWL1000_UCODE_API_MAX 5 35#define IWL1000_UCODE_API_MAX 5
@@ -64,13 +64,26 @@ static const struct iwl_base_params iwl1000_base_params = {
64 .support_ct_kill_exit = true, 64 .support_ct_kill_exit = true,
65 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, 65 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
66 .chain_noise_scale = 1000, 66 .chain_noise_scale = 1000,
67 .wd_timeout = IWL_WATCHHDOG_DISABLED, 67 .wd_timeout = IWL_WATCHDOG_DISABLED,
68 .max_event_log_size = 128, 68 .max_event_log_size = 128,
69}; 69};
70 70
71static const struct iwl_ht_params iwl1000_ht_params = { 71static const struct iwl_ht_params iwl1000_ht_params = {
72 .ht_greenfield_support = true, 72 .ht_greenfield_support = true,
73 .use_rts_for_aggregation = true, /* use rts/cts protection */ 73 .use_rts_for_aggregation = true, /* use rts/cts protection */
74 .ht40_bands = BIT(IEEE80211_BAND_2GHZ),
75};
76
77static const struct iwl_eeprom_params iwl1000_eeprom_params = {
78 .regulatory_bands = {
79 EEPROM_REG_BAND_1_CHANNELS,
80 EEPROM_REG_BAND_2_CHANNELS,
81 EEPROM_REG_BAND_3_CHANNELS,
82 EEPROM_REG_BAND_4_CHANNELS,
83 EEPROM_REG_BAND_5_CHANNELS,
84 EEPROM_REG_BAND_24_HT40_CHANNELS,
85 EEPROM_REGULATORY_BAND_NO_HT40,
86 }
74}; 87};
75 88
76#define IWL_DEVICE_1000 \ 89#define IWL_DEVICE_1000 \
@@ -84,6 +97,7 @@ static const struct iwl_ht_params iwl1000_ht_params = {
84 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \ 97 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
85 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ 98 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
86 .base_params = &iwl1000_base_params, \ 99 .base_params = &iwl1000_base_params, \
100 .eeprom_params = &iwl1000_eeprom_params, \
87 .led_mode = IWL_LED_BLINK 101 .led_mode = IWL_LED_BLINK
88 102
89const struct iwl_cfg iwl1000_bgn_cfg = { 103const struct iwl_cfg iwl1000_bgn_cfg = {
@@ -108,6 +122,7 @@ const struct iwl_cfg iwl1000_bg_cfg = {
108 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \ 122 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
109 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ 123 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
110 .base_params = &iwl1000_base_params, \ 124 .base_params = &iwl1000_base_params, \
125 .eeprom_params = &iwl1000_eeprom_params, \
111 .led_mode = IWL_LED_RF_STATE, \ 126 .led_mode = IWL_LED_RF_STATE, \
112 .rx_with_siso_diversity = true 127 .rx_with_siso_diversity = true
113 128
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/pcie/2000.c
index 8133105ac645..fd4e78f56fa6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/pcie/2000.c
@@ -27,9 +27,9 @@
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/stringify.h> 28#include <linux/stringify.h>
29#include "iwl-config.h" 29#include "iwl-config.h"
30#include "iwl-cfg.h"
31#include "iwl-agn-hw.h" 30#include "iwl-agn-hw.h"
32#include "iwl-commands.h" /* needed for BT for now */ 31#include "cfg.h"
32#include "dvm/commands.h" /* needed for BT for now */
33 33
34/* Highest firmware API version supported */ 34/* Highest firmware API version supported */
35#define IWL2030_UCODE_API_MAX 6 35#define IWL2030_UCODE_API_MAX 6
@@ -104,6 +104,7 @@ static const struct iwl_base_params iwl2030_base_params = {
104static const struct iwl_ht_params iwl2000_ht_params = { 104static const struct iwl_ht_params iwl2000_ht_params = {
105 .ht_greenfield_support = true, 105 .ht_greenfield_support = true,
106 .use_rts_for_aggregation = true, /* use rts/cts protection */ 106 .use_rts_for_aggregation = true, /* use rts/cts protection */
107 .ht40_bands = BIT(IEEE80211_BAND_2GHZ),
107}; 108};
108 109
109static const struct iwl_bt_params iwl2030_bt_params = { 110static const struct iwl_bt_params iwl2030_bt_params = {
@@ -116,6 +117,19 @@ static const struct iwl_bt_params iwl2030_bt_params = {
116 .bt_session_2 = true, 117 .bt_session_2 = true,
117}; 118};
118 119
120static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
121 .regulatory_bands = {
122 EEPROM_REG_BAND_1_CHANNELS,
123 EEPROM_REG_BAND_2_CHANNELS,
124 EEPROM_REG_BAND_3_CHANNELS,
125 EEPROM_REG_BAND_4_CHANNELS,
126 EEPROM_REG_BAND_5_CHANNELS,
127 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
128 EEPROM_REGULATORY_BAND_NO_HT40,
129 },
130 .enhanced_txpower = true,
131};
132
119#define IWL_DEVICE_2000 \ 133#define IWL_DEVICE_2000 \
120 .fw_name_pre = IWL2000_FW_PRE, \ 134 .fw_name_pre = IWL2000_FW_PRE, \
121 .ucode_api_max = IWL2000_UCODE_API_MAX, \ 135 .ucode_api_max = IWL2000_UCODE_API_MAX, \
@@ -127,6 +141,7 @@ static const struct iwl_bt_params iwl2030_bt_params = {
127 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ 141 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
128 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 142 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
129 .base_params = &iwl2000_base_params, \ 143 .base_params = &iwl2000_base_params, \
144 .eeprom_params = &iwl20x0_eeprom_params, \
130 .need_temp_offset_calib = true, \ 145 .need_temp_offset_calib = true, \
131 .temp_offset_v2 = true, \ 146 .temp_offset_v2 = true, \
132 .led_mode = IWL_LED_RF_STATE 147 .led_mode = IWL_LED_RF_STATE
@@ -155,6 +170,7 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = {
155 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 170 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
156 .base_params = &iwl2030_base_params, \ 171 .base_params = &iwl2030_base_params, \
157 .bt_params = &iwl2030_bt_params, \ 172 .bt_params = &iwl2030_bt_params, \
173 .eeprom_params = &iwl20x0_eeprom_params, \
158 .need_temp_offset_calib = true, \ 174 .need_temp_offset_calib = true, \
159 .temp_offset_v2 = true, \ 175 .temp_offset_v2 = true, \
160 .led_mode = IWL_LED_RF_STATE, \ 176 .led_mode = IWL_LED_RF_STATE, \
@@ -177,6 +193,7 @@ const struct iwl_cfg iwl2030_2bgn_cfg = {
177 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ 193 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
178 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 194 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
179 .base_params = &iwl2000_base_params, \ 195 .base_params = &iwl2000_base_params, \
196 .eeprom_params = &iwl20x0_eeprom_params, \
180 .need_temp_offset_calib = true, \ 197 .need_temp_offset_calib = true, \
181 .temp_offset_v2 = true, \ 198 .temp_offset_v2 = true, \
182 .led_mode = IWL_LED_RF_STATE, \ 199 .led_mode = IWL_LED_RF_STATE, \
@@ -207,6 +224,7 @@ const struct iwl_cfg iwl105_bgn_d_cfg = {
207 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 224 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
208 .base_params = &iwl2030_base_params, \ 225 .base_params = &iwl2030_base_params, \
209 .bt_params = &iwl2030_bt_params, \ 226 .bt_params = &iwl2030_bt_params, \
227 .eeprom_params = &iwl20x0_eeprom_params, \
210 .need_temp_offset_calib = true, \ 228 .need_temp_offset_calib = true, \
211 .temp_offset_v2 = true, \ 229 .temp_offset_v2 = true, \
212 .led_mode = IWL_LED_RF_STATE, \ 230 .led_mode = IWL_LED_RF_STATE, \
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/pcie/5000.c
index 8e26bc825f23..d1665fa6d15a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/pcie/5000.c
@@ -27,9 +27,9 @@
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/stringify.h> 28#include <linux/stringify.h>
29#include "iwl-config.h" 29#include "iwl-config.h"
30#include "iwl-cfg.h"
31#include "iwl-agn-hw.h" 30#include "iwl-agn-hw.h"
32#include "iwl-csr.h" 31#include "iwl-csr.h"
32#include "cfg.h"
33 33
34/* Highest firmware API version supported */ 34/* Highest firmware API version supported */
35#define IWL5000_UCODE_API_MAX 5 35#define IWL5000_UCODE_API_MAX 5
@@ -62,13 +62,26 @@ static const struct iwl_base_params iwl5000_base_params = {
62 .led_compensation = 51, 62 .led_compensation = 51,
63 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 63 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
64 .chain_noise_scale = 1000, 64 .chain_noise_scale = 1000,
65 .wd_timeout = IWL_WATCHHDOG_DISABLED, 65 .wd_timeout = IWL_WATCHDOG_DISABLED,
66 .max_event_log_size = 512, 66 .max_event_log_size = 512,
67 .no_idle_support = true, 67 .no_idle_support = true,
68}; 68};
69 69
70static const struct iwl_ht_params iwl5000_ht_params = { 70static const struct iwl_ht_params iwl5000_ht_params = {
71 .ht_greenfield_support = true, 71 .ht_greenfield_support = true,
72 .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
73};
74
75static const struct iwl_eeprom_params iwl5000_eeprom_params = {
76 .regulatory_bands = {
77 EEPROM_REG_BAND_1_CHANNELS,
78 EEPROM_REG_BAND_2_CHANNELS,
79 EEPROM_REG_BAND_3_CHANNELS,
80 EEPROM_REG_BAND_4_CHANNELS,
81 EEPROM_REG_BAND_5_CHANNELS,
82 EEPROM_REG_BAND_24_HT40_CHANNELS,
83 EEPROM_REG_BAND_52_HT40_CHANNELS
84 },
72}; 85};
73 86
74#define IWL_DEVICE_5000 \ 87#define IWL_DEVICE_5000 \
@@ -82,6 +95,7 @@ static const struct iwl_ht_params iwl5000_ht_params = {
82 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, \ 95 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, \
83 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, \ 96 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, \
84 .base_params = &iwl5000_base_params, \ 97 .base_params = &iwl5000_base_params, \
98 .eeprom_params = &iwl5000_eeprom_params, \
85 .led_mode = IWL_LED_BLINK 99 .led_mode = IWL_LED_BLINK
86 100
87const struct iwl_cfg iwl5300_agn_cfg = { 101const struct iwl_cfg iwl5300_agn_cfg = {
@@ -128,6 +142,7 @@ const struct iwl_cfg iwl5350_agn_cfg = {
128 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, 142 .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
129 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, 143 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
130 .base_params = &iwl5000_base_params, 144 .base_params = &iwl5000_base_params,
145 .eeprom_params = &iwl5000_eeprom_params,
131 .ht_params = &iwl5000_ht_params, 146 .ht_params = &iwl5000_ht_params,
132 .led_mode = IWL_LED_BLINK, 147 .led_mode = IWL_LED_BLINK,
133 .internal_wimax_coex = true, 148 .internal_wimax_coex = true,
@@ -144,6 +159,7 @@ const struct iwl_cfg iwl5350_agn_cfg = {
144 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, \ 159 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, \
145 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \ 160 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \
146 .base_params = &iwl5000_base_params, \ 161 .base_params = &iwl5000_base_params, \
162 .eeprom_params = &iwl5000_eeprom_params, \
147 .no_xtal_calib = true, \ 163 .no_xtal_calib = true, \
148 .led_mode = IWL_LED_BLINK, \ 164 .led_mode = IWL_LED_BLINK, \
149 .internal_wimax_coex = true 165 .internal_wimax_coex = true
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/pcie/6000.c
index e5e8ada4aaf6..4a57624afc40 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/pcie/6000.c
@@ -27,9 +27,9 @@
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/stringify.h> 28#include <linux/stringify.h>
29#include "iwl-config.h" 29#include "iwl-config.h"
30#include "iwl-cfg.h"
31#include "iwl-agn-hw.h" 30#include "iwl-agn-hw.h"
32#include "iwl-commands.h" /* needed for BT for now */ 31#include "cfg.h"
32#include "dvm/commands.h" /* needed for BT for now */
33 33
34/* Highest firmware API version supported */ 34/* Highest firmware API version supported */
35#define IWL6000_UCODE_API_MAX 6 35#define IWL6000_UCODE_API_MAX 6
@@ -127,6 +127,7 @@ static const struct iwl_base_params iwl6000_g2_base_params = {
127static const struct iwl_ht_params iwl6000_ht_params = { 127static const struct iwl_ht_params iwl6000_ht_params = {
128 .ht_greenfield_support = true, 128 .ht_greenfield_support = true,
129 .use_rts_for_aggregation = true, /* use rts/cts protection */ 129 .use_rts_for_aggregation = true, /* use rts/cts protection */
130 .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
130}; 131};
131 132
132static const struct iwl_bt_params iwl6000_bt_params = { 133static const struct iwl_bt_params iwl6000_bt_params = {
@@ -138,6 +139,19 @@ static const struct iwl_bt_params iwl6000_bt_params = {
138 .bt_sco_disable = true, 139 .bt_sco_disable = true,
139}; 140};
140 141
142static const struct iwl_eeprom_params iwl6000_eeprom_params = {
143 .regulatory_bands = {
144 EEPROM_REG_BAND_1_CHANNELS,
145 EEPROM_REG_BAND_2_CHANNELS,
146 EEPROM_REG_BAND_3_CHANNELS,
147 EEPROM_REG_BAND_4_CHANNELS,
148 EEPROM_REG_BAND_5_CHANNELS,
149 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
150 EEPROM_REG_BAND_52_HT40_CHANNELS
151 },
152 .enhanced_txpower = true,
153};
154
141#define IWL_DEVICE_6005 \ 155#define IWL_DEVICE_6005 \
142 .fw_name_pre = IWL6005_FW_PRE, \ 156 .fw_name_pre = IWL6005_FW_PRE, \
143 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ 157 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
@@ -149,6 +163,7 @@ static const struct iwl_bt_params iwl6000_bt_params = {
149 .eeprom_ver = EEPROM_6005_EEPROM_VERSION, \ 163 .eeprom_ver = EEPROM_6005_EEPROM_VERSION, \
150 .eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \ 164 .eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
151 .base_params = &iwl6000_g2_base_params, \ 165 .base_params = &iwl6000_g2_base_params, \
166 .eeprom_params = &iwl6000_eeprom_params, \
152 .need_temp_offset_calib = true, \ 167 .need_temp_offset_calib = true, \
153 .led_mode = IWL_LED_RF_STATE 168 .led_mode = IWL_LED_RF_STATE
154 169
@@ -204,6 +219,7 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
204 .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ 219 .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
205 .base_params = &iwl6000_g2_base_params, \ 220 .base_params = &iwl6000_g2_base_params, \
206 .bt_params = &iwl6000_bt_params, \ 221 .bt_params = &iwl6000_bt_params, \
222 .eeprom_params = &iwl6000_eeprom_params, \
207 .need_temp_offset_calib = true, \ 223 .need_temp_offset_calib = true, \
208 .led_mode = IWL_LED_RF_STATE, \ 224 .led_mode = IWL_LED_RF_STATE, \
209 .adv_pm = true \ 225 .adv_pm = true \
@@ -242,6 +258,7 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
242 .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ 258 .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
243 .base_params = &iwl6000_g2_base_params, \ 259 .base_params = &iwl6000_g2_base_params, \
244 .bt_params = &iwl6000_bt_params, \ 260 .bt_params = &iwl6000_bt_params, \
261 .eeprom_params = &iwl6000_eeprom_params, \
245 .need_temp_offset_calib = true, \ 262 .need_temp_offset_calib = true, \
246 .led_mode = IWL_LED_RF_STATE, \ 263 .led_mode = IWL_LED_RF_STATE, \
247 .adv_pm = true 264 .adv_pm = true
@@ -292,6 +309,7 @@ const struct iwl_cfg iwl130_bg_cfg = {
292 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, \ 309 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, \
293 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, \ 310 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, \
294 .base_params = &iwl6000_base_params, \ 311 .base_params = &iwl6000_base_params, \
312 .eeprom_params = &iwl6000_eeprom_params, \
295 .led_mode = IWL_LED_BLINK 313 .led_mode = IWL_LED_BLINK
296 314
297const struct iwl_cfg iwl6000i_2agn_cfg = { 315const struct iwl_cfg iwl6000i_2agn_cfg = {
@@ -322,6 +340,7 @@ const struct iwl_cfg iwl6000i_2bg_cfg = {
322 .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \ 340 .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \
323 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \ 341 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \
324 .base_params = &iwl6050_base_params, \ 342 .base_params = &iwl6050_base_params, \
343 .eeprom_params = &iwl6000_eeprom_params, \
325 .led_mode = IWL_LED_BLINK, \ 344 .led_mode = IWL_LED_BLINK, \
326 .internal_wimax_coex = true 345 .internal_wimax_coex = true
327 346
@@ -346,6 +365,7 @@ const struct iwl_cfg iwl6050_2abg_cfg = {
346 .eeprom_ver = EEPROM_6150_EEPROM_VERSION, \ 365 .eeprom_ver = EEPROM_6150_EEPROM_VERSION, \
347 .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \ 366 .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \
348 .base_params = &iwl6050_base_params, \ 367 .base_params = &iwl6050_base_params, \
368 .eeprom_params = &iwl6000_eeprom_params, \
349 .led_mode = IWL_LED_BLINK, \ 369 .led_mode = IWL_LED_BLINK, \
350 .internal_wimax_coex = true 370 .internal_wimax_coex = true
351 371
@@ -372,6 +392,7 @@ const struct iwl_cfg iwl6000_3agn_cfg = {
372 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 392 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
373 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, 393 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
374 .base_params = &iwl6000_base_params, 394 .base_params = &iwl6000_base_params,
395 .eeprom_params = &iwl6000_eeprom_params,
375 .ht_params = &iwl6000_ht_params, 396 .ht_params = &iwl6000_ht_params,
376 .led_mode = IWL_LED_BLINK, 397 .led_mode = IWL_LED_BLINK,
377}; 398};
diff --git a/drivers/net/wireless/iwlwifi/iwl-cfg.h b/drivers/net/wireless/iwlwifi/pcie/cfg.h
index 82152311d73b..82152311d73b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-cfg.h
+++ b/drivers/net/wireless/iwlwifi/pcie/cfg.h
diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 0c8a1c2d8847..f4c3500b68c6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-pci.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -68,10 +68,11 @@
68#include <linux/pci-aspm.h> 68#include <linux/pci-aspm.h>
69 69
70#include "iwl-trans.h" 70#include "iwl-trans.h"
71#include "iwl-cfg.h"
72#include "iwl-drv.h" 71#include "iwl-drv.h"
73#include "iwl-trans.h" 72#include "iwl-trans.h"
74#include "iwl-trans-pcie-int.h" 73
74#include "cfg.h"
75#include "internal.h"
75 76
76#define IWL_PCI_DEVICE(dev, subdev, cfg) \ 77#define IWL_PCI_DEVICE(dev, subdev, cfg) \
77 .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \ 78 .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index e959207c630a..d9694c58208c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -269,10 +269,9 @@ struct iwl_trans_pcie {
269 wait_queue_head_t ucode_write_waitq; 269 wait_queue_head_t ucode_write_waitq;
270 unsigned long status; 270 unsigned long status;
271 u8 cmd_queue; 271 u8 cmd_queue;
272 u8 cmd_fifo;
272 u8 n_no_reclaim_cmds; 273 u8 n_no_reclaim_cmds;
273 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; 274 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
274 u8 setup_q_to_fifo[IWL_MAX_HW_QUEUES];
275 u8 n_q_to_fifo;
276 275
277 bool rx_buf_size_8k; 276 bool rx_buf_size_8k;
278 u32 rx_page_order; 277 u32 rx_page_order;
@@ -313,7 +312,7 @@ void iwl_bg_rx_replenish(struct work_struct *data);
313void iwl_irq_tasklet(struct iwl_trans *trans); 312void iwl_irq_tasklet(struct iwl_trans *trans);
314void iwlagn_rx_replenish(struct iwl_trans *trans); 313void iwlagn_rx_replenish(struct iwl_trans *trans);
315void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans, 314void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
316 struct iwl_rx_queue *q); 315 struct iwl_rx_queue *q);
317 316
318/***************************************************** 317/*****************************************************
319* ICT 318* ICT
@@ -328,7 +327,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data);
328* TX / HCMD 327* TX / HCMD
329******************************************************/ 328******************************************************/
330void iwl_txq_update_write_ptr(struct iwl_trans *trans, 329void iwl_txq_update_write_ptr(struct iwl_trans *trans,
331 struct iwl_tx_queue *txq); 330 struct iwl_tx_queue *txq);
332int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans, 331int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
333 struct iwl_tx_queue *txq, 332 struct iwl_tx_queue *txq,
334 dma_addr_t addr, u16 len, u8 reset); 333 dma_addr_t addr, u16 len, u8 reset);
@@ -337,17 +336,13 @@ int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
337void iwl_tx_cmd_complete(struct iwl_trans *trans, 336void iwl_tx_cmd_complete(struct iwl_trans *trans,
338 struct iwl_rx_cmd_buffer *rxb, int handler_status); 337 struct iwl_rx_cmd_buffer *rxb, int handler_status);
339void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, 338void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
340 struct iwl_tx_queue *txq, 339 struct iwl_tx_queue *txq,
341 u16 byte_cnt); 340 u16 byte_cnt);
342void iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int queue); 341void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
343void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index); 342 int sta_id, int tid, int frame_limit, u16 ssn);
344void iwl_trans_tx_queue_set_status(struct iwl_trans *trans, 343void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue);
345 struct iwl_tx_queue *txq, 344void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
346 int tx_fifo_id, bool active); 345 enum dma_data_direction dma_dir);
347void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int queue, int fifo,
348 int sta_id, int tid, int frame_limit, u16 ssn);
349void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
350 enum dma_data_direction dma_dir);
351int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, 346int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
352 struct sk_buff_head *skbs); 347 struct sk_buff_head *skbs);
353int iwl_queue_space(const struct iwl_queue *q); 348int iwl_queue_space(const struct iwl_queue *q);
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index 08517d3c80bb..39a6ca1f009c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -32,7 +32,7 @@
32 32
33#include "iwl-prph.h" 33#include "iwl-prph.h"
34#include "iwl-io.h" 34#include "iwl-io.h"
35#include "iwl-trans-pcie-int.h" 35#include "internal.h"
36#include "iwl-op-mode.h" 36#include "iwl-op-mode.h"
37 37
38#ifdef CONFIG_IWLWIFI_IDI 38#ifdef CONFIG_IWLWIFI_IDI
@@ -130,7 +130,7 @@ static int iwl_rx_queue_space(const struct iwl_rx_queue *q)
130 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue 130 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
131 */ 131 */
132void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans, 132void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
133 struct iwl_rx_queue *q) 133 struct iwl_rx_queue *q)
134{ 134{
135 unsigned long flags; 135 unsigned long flags;
136 u32 reg; 136 u32 reg;
@@ -201,9 +201,7 @@ static inline __le32 iwlagn_dma_addr2rbd_ptr(dma_addr_t dma_addr)
201 */ 201 */
202static void iwlagn_rx_queue_restock(struct iwl_trans *trans) 202static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
203{ 203{
204 struct iwl_trans_pcie *trans_pcie = 204 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
205 IWL_TRANS_GET_PCIE_TRANS(trans);
206
207 struct iwl_rx_queue *rxq = &trans_pcie->rxq; 205 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
208 struct list_head *element; 206 struct list_head *element;
209 struct iwl_rx_mem_buffer *rxb; 207 struct iwl_rx_mem_buffer *rxb;
@@ -253,9 +251,7 @@ static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
253 */ 251 */
254static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority) 252static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
255{ 253{
256 struct iwl_trans_pcie *trans_pcie = 254 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
257 IWL_TRANS_GET_PCIE_TRANS(trans);
258
259 struct iwl_rx_queue *rxq = &trans_pcie->rxq; 255 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
260 struct list_head *element; 256 struct list_head *element;
261 struct iwl_rx_mem_buffer *rxb; 257 struct iwl_rx_mem_buffer *rxb;
@@ -278,8 +274,7 @@ static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
278 gfp_mask |= __GFP_COMP; 274 gfp_mask |= __GFP_COMP;
279 275
280 /* Alloc a new receive buffer */ 276 /* Alloc a new receive buffer */
281 page = alloc_pages(gfp_mask, 277 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
282 trans_pcie->rx_page_order);
283 if (!page) { 278 if (!page) {
284 if (net_ratelimit()) 279 if (net_ratelimit())
285 IWL_DEBUG_INFO(trans, "alloc_pages failed, " 280 IWL_DEBUG_INFO(trans, "alloc_pages failed, "
@@ -315,9 +310,10 @@ static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
315 BUG_ON(rxb->page); 310 BUG_ON(rxb->page);
316 rxb->page = page; 311 rxb->page = page;
317 /* Get physical address of the RB */ 312 /* Get physical address of the RB */
318 rxb->page_dma = dma_map_page(trans->dev, page, 0, 313 rxb->page_dma =
319 PAGE_SIZE << trans_pcie->rx_page_order, 314 dma_map_page(trans->dev, page, 0,
320 DMA_FROM_DEVICE); 315 PAGE_SIZE << trans_pcie->rx_page_order,
316 DMA_FROM_DEVICE);
321 /* dma address must be no more than 36 bits */ 317 /* dma address must be no more than 36 bits */
322 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); 318 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
323 /* and also 256 byte aligned! */ 319 /* and also 256 byte aligned! */
@@ -465,8 +461,8 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
465 if (rxb->page != NULL) { 461 if (rxb->page != NULL) {
466 rxb->page_dma = 462 rxb->page_dma =
467 dma_map_page(trans->dev, rxb->page, 0, 463 dma_map_page(trans->dev, rxb->page, 0,
468 PAGE_SIZE << trans_pcie->rx_page_order, 464 PAGE_SIZE << trans_pcie->rx_page_order,
469 DMA_FROM_DEVICE); 465 DMA_FROM_DEVICE);
470 list_add_tail(&rxb->list, &rxq->rx_free); 466 list_add_tail(&rxb->list, &rxq->rx_free);
471 rxq->free_count++; 467 rxq->free_count++;
472 } else 468 } else
@@ -497,7 +493,7 @@ static void iwl_rx_handle(struct iwl_trans *trans)
497 493
498 /* Rx interrupt, but nothing sent from uCode */ 494 /* Rx interrupt, but nothing sent from uCode */
499 if (i == r) 495 if (i == r)
500 IWL_DEBUG_RX(trans, "r = %d, i = %d\n", r, i); 496 IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
501 497
502 /* calculate total frames need to be restock after handling RX */ 498 /* calculate total frames need to be restock after handling RX */
503 total_empty = r - rxq->write_actual; 499 total_empty = r - rxq->write_actual;
@@ -513,8 +509,8 @@ static void iwl_rx_handle(struct iwl_trans *trans)
513 rxb = rxq->queue[i]; 509 rxb = rxq->queue[i];
514 rxq->queue[i] = NULL; 510 rxq->queue[i] = NULL;
515 511
516 IWL_DEBUG_RX(trans, "rxbuf: r = %d, i = %d (%p)\n", rxb); 512 IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
517 513 r, i, rxb);
518 iwl_rx_handle_rxbuf(trans, rxb); 514 iwl_rx_handle_rxbuf(trans, rxb);
519 515
520 i = (i + 1) & RX_QUEUE_MASK; 516 i = (i + 1) & RX_QUEUE_MASK;
@@ -546,12 +542,12 @@ static void iwl_irq_handle_error(struct iwl_trans *trans)
546 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ 542 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
547 if (trans->cfg->internal_wimax_coex && 543 if (trans->cfg->internal_wimax_coex &&
548 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & 544 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
549 APMS_CLK_VAL_MRB_FUNC_MODE) || 545 APMS_CLK_VAL_MRB_FUNC_MODE) ||
550 (iwl_read_prph(trans, APMG_PS_CTRL_REG) & 546 (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
551 APMG_PS_CTRL_VAL_RESET_REQ))) { 547 APMG_PS_CTRL_VAL_RESET_REQ))) {
552 struct iwl_trans_pcie *trans_pcie; 548 struct iwl_trans_pcie *trans_pcie =
549 IWL_TRANS_GET_PCIE_TRANS(trans);
553 550
554 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
555 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); 551 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
556 iwl_op_mode_wimax_active(trans->op_mode); 552 iwl_op_mode_wimax_active(trans->op_mode);
557 wake_up(&trans->wait_command_queue); 553 wake_up(&trans->wait_command_queue);
@@ -567,6 +563,8 @@ static void iwl_irq_handle_error(struct iwl_trans *trans)
567/* tasklet for iwlagn interrupt */ 563/* tasklet for iwlagn interrupt */
568void iwl_irq_tasklet(struct iwl_trans *trans) 564void iwl_irq_tasklet(struct iwl_trans *trans)
569{ 565{
566 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
567 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
570 u32 inta = 0; 568 u32 inta = 0;
571 u32 handled = 0; 569 u32 handled = 0;
572 unsigned long flags; 570 unsigned long flags;
@@ -575,10 +573,6 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
575 u32 inta_mask; 573 u32 inta_mask;
576#endif 574#endif
577 575
578 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
579 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
580
581
582 spin_lock_irqsave(&trans_pcie->irq_lock, flags); 576 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
583 577
584 /* Ack/clear/reset pending uCode interrupts. 578 /* Ack/clear/reset pending uCode interrupts.
@@ -593,7 +587,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
593 * interrupt coalescing can still be achieved. 587 * interrupt coalescing can still be achieved.
594 */ 588 */
595 iwl_write32(trans, CSR_INT, 589 iwl_write32(trans, CSR_INT,
596 trans_pcie->inta | ~trans_pcie->inta_mask); 590 trans_pcie->inta | ~trans_pcie->inta_mask);
597 591
598 inta = trans_pcie->inta; 592 inta = trans_pcie->inta;
599 593
@@ -602,7 +596,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
602 /* just for debug */ 596 /* just for debug */
603 inta_mask = iwl_read32(trans, CSR_INT_MASK); 597 inta_mask = iwl_read32(trans, CSR_INT_MASK);
604 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", 598 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
605 inta, inta_mask); 599 inta, inta_mask);
606 } 600 }
607#endif 601#endif
608 602
@@ -651,7 +645,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
651 645
652 hw_rfkill = iwl_is_rfkill_set(trans); 646 hw_rfkill = iwl_is_rfkill_set(trans);
653 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", 647 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
654 hw_rfkill ? "disable radio" : "enable radio"); 648 hw_rfkill ? "disable radio" : "enable radio");
655 649
656 isr_stats->rfkill++; 650 isr_stats->rfkill++;
657 651
@@ -693,7 +687,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
693 * Rx "responses" (frame-received notification), and other 687 * Rx "responses" (frame-received notification), and other
694 * notifications from uCode come through here*/ 688 * notifications from uCode come through here*/
695 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | 689 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
696 CSR_INT_BIT_RX_PERIODIC)) { 690 CSR_INT_BIT_RX_PERIODIC)) {
697 IWL_DEBUG_ISR(trans, "Rx interrupt\n"); 691 IWL_DEBUG_ISR(trans, "Rx interrupt\n");
698 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { 692 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
699 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); 693 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
@@ -733,7 +727,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
733 */ 727 */
734 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) 728 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
735 iwl_write8(trans, CSR_INT_PERIODIC_REG, 729 iwl_write8(trans, CSR_INT_PERIODIC_REG,
736 CSR_INT_PERIODIC_ENA); 730 CSR_INT_PERIODIC_ENA);
737 731
738 isr_stats->rx++; 732 isr_stats->rx++;
739 } 733 }
@@ -782,8 +776,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
782/* Free dram table */ 776/* Free dram table */
783void iwl_free_isr_ict(struct iwl_trans *trans) 777void iwl_free_isr_ict(struct iwl_trans *trans)
784{ 778{
785 struct iwl_trans_pcie *trans_pcie = 779 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
786 IWL_TRANS_GET_PCIE_TRANS(trans);
787 780
788 if (trans_pcie->ict_tbl) { 781 if (trans_pcie->ict_tbl) {
789 dma_free_coherent(trans->dev, ICT_SIZE, 782 dma_free_coherent(trans->dev, ICT_SIZE,
@@ -802,8 +795,7 @@ void iwl_free_isr_ict(struct iwl_trans *trans)
802 */ 795 */
803int iwl_alloc_isr_ict(struct iwl_trans *trans) 796int iwl_alloc_isr_ict(struct iwl_trans *trans)
804{ 797{
805 struct iwl_trans_pcie *trans_pcie = 798 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
806 IWL_TRANS_GET_PCIE_TRANS(trans);
807 799
808 trans_pcie->ict_tbl = 800 trans_pcie->ict_tbl =
809 dma_alloc_coherent(trans->dev, ICT_SIZE, 801 dma_alloc_coherent(trans->dev, ICT_SIZE,
@@ -837,10 +829,9 @@ int iwl_alloc_isr_ict(struct iwl_trans *trans)
837 */ 829 */
838void iwl_reset_ict(struct iwl_trans *trans) 830void iwl_reset_ict(struct iwl_trans *trans)
839{ 831{
832 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
840 u32 val; 833 u32 val;
841 unsigned long flags; 834 unsigned long flags;
842 struct iwl_trans_pcie *trans_pcie =
843 IWL_TRANS_GET_PCIE_TRANS(trans);
844 835
845 if (!trans_pcie->ict_tbl) 836 if (!trans_pcie->ict_tbl)
846 return; 837 return;
@@ -868,9 +859,7 @@ void iwl_reset_ict(struct iwl_trans *trans)
868/* Device is going down disable ict interrupt usage */ 859/* Device is going down disable ict interrupt usage */
869void iwl_disable_ict(struct iwl_trans *trans) 860void iwl_disable_ict(struct iwl_trans *trans)
870{ 861{
871 struct iwl_trans_pcie *trans_pcie = 862 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
872 IWL_TRANS_GET_PCIE_TRANS(trans);
873
874 unsigned long flags; 863 unsigned long flags;
875 864
876 spin_lock_irqsave(&trans_pcie->irq_lock, flags); 865 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
@@ -878,23 +867,19 @@ void iwl_disable_ict(struct iwl_trans *trans)
878 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 867 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
879} 868}
880 869
870/* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */
881static irqreturn_t iwl_isr(int irq, void *data) 871static irqreturn_t iwl_isr(int irq, void *data)
882{ 872{
883 struct iwl_trans *trans = data; 873 struct iwl_trans *trans = data;
884 struct iwl_trans_pcie *trans_pcie; 874 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
885 u32 inta, inta_mask; 875 u32 inta, inta_mask;
886 unsigned long flags;
887#ifdef CONFIG_IWLWIFI_DEBUG 876#ifdef CONFIG_IWLWIFI_DEBUG
888 u32 inta_fh; 877 u32 inta_fh;
889#endif 878#endif
890 if (!trans)
891 return IRQ_NONE;
892 879
893 trace_iwlwifi_dev_irq(trans->dev); 880 lockdep_assert_held(&trans_pcie->irq_lock);
894 881
895 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 882 trace_iwlwifi_dev_irq(trans->dev);
896
897 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
898 883
899 /* Disable (but don't clear!) interrupts here to avoid 884 /* Disable (but don't clear!) interrupts here to avoid
900 * back-to-back ISRs and sporadic interrupts from our NIC. 885 * back-to-back ISRs and sporadic interrupts from our NIC.
@@ -918,7 +903,7 @@ static irqreturn_t iwl_isr(int irq, void *data)
918 /* Hardware disappeared. It might have already raised 903 /* Hardware disappeared. It might have already raised
919 * an interrupt */ 904 * an interrupt */
920 IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta); 905 IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
921 goto unplugged; 906 return IRQ_HANDLED;
922 } 907 }
923 908
924#ifdef CONFIG_IWLWIFI_DEBUG 909#ifdef CONFIG_IWLWIFI_DEBUG
@@ -934,21 +919,16 @@ static irqreturn_t iwl_isr(int irq, void *data)
934 if (likely(inta)) 919 if (likely(inta))
935 tasklet_schedule(&trans_pcie->irq_tasklet); 920 tasklet_schedule(&trans_pcie->irq_tasklet);
936 else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && 921 else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
937 !trans_pcie->inta) 922 !trans_pcie->inta)
938 iwl_enable_interrupts(trans); 923 iwl_enable_interrupts(trans);
939 924
940 unplugged: 925none:
941 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
942 return IRQ_HANDLED;
943
944 none:
945 /* re-enable interrupts here since we don't have anything to service. */ 926 /* re-enable interrupts here since we don't have anything to service. */
946 /* only Re-enable if disabled by irq and no schedules tasklet. */ 927 /* only Re-enable if disabled by irq and no schedules tasklet. */
947 if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && 928 if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
948 !trans_pcie->inta) 929 !trans_pcie->inta)
949 iwl_enable_interrupts(trans); 930 iwl_enable_interrupts(trans);
950 931
951 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
952 return IRQ_NONE; 932 return IRQ_NONE;
953} 933}
954 934
@@ -974,15 +954,19 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
974 954
975 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 955 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
976 956
957 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
958
977 /* dram interrupt table not set yet, 959 /* dram interrupt table not set yet,
978 * use legacy interrupt. 960 * use legacy interrupt.
979 */ 961 */
980 if (!trans_pcie->use_ict) 962 if (unlikely(!trans_pcie->use_ict)) {
981 return iwl_isr(irq, data); 963 irqreturn_t ret = iwl_isr(irq, data);
964 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
965 return ret;
966 }
982 967
983 trace_iwlwifi_dev_irq(trans->dev); 968 trace_iwlwifi_dev_irq(trans->dev);
984 969
985 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
986 970
987 /* Disable (but don't clear!) interrupts here to avoid 971 /* Disable (but don't clear!) interrupts here to avoid
988 * back-to-back ISRs and sporadic interrupts from our NIC. 972 * back-to-back ISRs and sporadic interrupts from our NIC.
@@ -1036,7 +1020,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
1036 1020
1037 inta = (0xff & val) | ((0xff00 & val) << 16); 1021 inta = (0xff & val) | ((0xff00 & val) << 16);
1038 IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n", 1022 IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
1039 inta, inta_mask, val); 1023 inta, inta_mask, val);
1040 1024
1041 inta &= trans_pcie->inta_mask; 1025 inta &= trans_pcie->inta_mask;
1042 trans_pcie->inta |= inta; 1026 trans_pcie->inta |= inta;
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 79c6b91417f9..09795afccb23 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -70,15 +70,12 @@
70 70
71#include "iwl-drv.h" 71#include "iwl-drv.h"
72#include "iwl-trans.h" 72#include "iwl-trans.h"
73#include "iwl-trans-pcie-int.h"
74#include "iwl-csr.h" 73#include "iwl-csr.h"
75#include "iwl-prph.h" 74#include "iwl-prph.h"
76#include "iwl-eeprom.h"
77#include "iwl-agn-hw.h" 75#include "iwl-agn-hw.h"
76#include "internal.h"
78/* FIXME: need to abstract out TX command (once we know what it looks like) */ 77/* FIXME: need to abstract out TX command (once we know what it looks like) */
79#include "iwl-commands.h" 78#include "dvm/commands.h"
80
81#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
82 79
83#define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \ 80#define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \
84 (((1<<trans->cfg->base_params->num_of_queues) - 1) &\ 81 (((1<<trans->cfg->base_params->num_of_queues) - 1) &\
@@ -86,8 +83,7 @@
86 83
87static int iwl_trans_rx_alloc(struct iwl_trans *trans) 84static int iwl_trans_rx_alloc(struct iwl_trans *trans)
88{ 85{
89 struct iwl_trans_pcie *trans_pcie = 86 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
90 IWL_TRANS_GET_PCIE_TRANS(trans);
91 struct iwl_rx_queue *rxq = &trans_pcie->rxq; 87 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
92 struct device *dev = trans->dev; 88 struct device *dev = trans->dev;
93 89
@@ -114,7 +110,7 @@ static int iwl_trans_rx_alloc(struct iwl_trans *trans)
114 110
115err_rb_stts: 111err_rb_stts:
116 dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, 112 dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
117 rxq->bd, rxq->bd_dma); 113 rxq->bd, rxq->bd_dma);
118 memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma)); 114 memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
119 rxq->bd = NULL; 115 rxq->bd = NULL;
120err_bd: 116err_bd:
@@ -123,8 +119,7 @@ err_bd:
123 119
124static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans) 120static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
125{ 121{
126 struct iwl_trans_pcie *trans_pcie = 122 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
127 IWL_TRANS_GET_PCIE_TRANS(trans);
128 struct iwl_rx_queue *rxq = &trans_pcie->rxq; 123 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
129 int i; 124 int i;
130 125
@@ -134,8 +129,8 @@ static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
134 * to an SKB, so we need to unmap and free potential storage */ 129 * to an SKB, so we need to unmap and free potential storage */
135 if (rxq->pool[i].page != NULL) { 130 if (rxq->pool[i].page != NULL) {
136 dma_unmap_page(trans->dev, rxq->pool[i].page_dma, 131 dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
137 PAGE_SIZE << trans_pcie->rx_page_order, 132 PAGE_SIZE << trans_pcie->rx_page_order,
138 DMA_FROM_DEVICE); 133 DMA_FROM_DEVICE);
139 __free_pages(rxq->pool[i].page, 134 __free_pages(rxq->pool[i].page,
140 trans_pcie->rx_page_order); 135 trans_pcie->rx_page_order);
141 rxq->pool[i].page = NULL; 136 rxq->pool[i].page = NULL;
@@ -193,8 +188,7 @@ static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
193 188
194static int iwl_rx_init(struct iwl_trans *trans) 189static int iwl_rx_init(struct iwl_trans *trans)
195{ 190{
196 struct iwl_trans_pcie *trans_pcie = 191 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
197 IWL_TRANS_GET_PCIE_TRANS(trans);
198 struct iwl_rx_queue *rxq = &trans_pcie->rxq; 192 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
199 193
200 int i, err; 194 int i, err;
@@ -236,10 +230,8 @@ static int iwl_rx_init(struct iwl_trans *trans)
236 230
237static void iwl_trans_pcie_rx_free(struct iwl_trans *trans) 231static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
238{ 232{
239 struct iwl_trans_pcie *trans_pcie = 233 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
240 IWL_TRANS_GET_PCIE_TRANS(trans);
241 struct iwl_rx_queue *rxq = &trans_pcie->rxq; 234 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
242
243 unsigned long flags; 235 unsigned long flags;
244 236
245 /*if rxq->bd is NULL, it means that nothing has been allocated, 237 /*if rxq->bd is NULL, it means that nothing has been allocated,
@@ -274,11 +266,11 @@ static int iwl_trans_rx_stop(struct iwl_trans *trans)
274 /* stop Rx DMA */ 266 /* stop Rx DMA */
275 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 267 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
276 return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, 268 return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
277 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); 269 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
278} 270}
279 271
280static inline int iwlagn_alloc_dma_ptr(struct iwl_trans *trans, 272static int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
281 struct iwl_dma_ptr *ptr, size_t size) 273 struct iwl_dma_ptr *ptr, size_t size)
282{ 274{
283 if (WARN_ON(ptr->addr)) 275 if (WARN_ON(ptr->addr))
284 return -EINVAL; 276 return -EINVAL;
@@ -291,8 +283,8 @@ static inline int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
291 return 0; 283 return 0;
292} 284}
293 285
294static inline void iwlagn_free_dma_ptr(struct iwl_trans *trans, 286static void iwlagn_free_dma_ptr(struct iwl_trans *trans,
295 struct iwl_dma_ptr *ptr) 287 struct iwl_dma_ptr *ptr)
296{ 288{
297 if (unlikely(!ptr->addr)) 289 if (unlikely(!ptr->addr))
298 return; 290 return;
@@ -304,8 +296,13 @@ static inline void iwlagn_free_dma_ptr(struct iwl_trans *trans,
304static void iwl_trans_pcie_queue_stuck_timer(unsigned long data) 296static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
305{ 297{
306 struct iwl_tx_queue *txq = (void *)data; 298 struct iwl_tx_queue *txq = (void *)data;
299 struct iwl_queue *q = &txq->q;
307 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; 300 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
308 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); 301 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
302 u32 scd_sram_addr = trans_pcie->scd_base_addr +
303 SCD_TX_STTS_MEM_LOWER_BOUND + (16 * txq->q.id);
304 u8 buf[16];
305 int i;
309 306
310 spin_lock(&txq->lock); 307 spin_lock(&txq->lock);
311 /* check if triggered erroneously */ 308 /* check if triggered erroneously */
@@ -315,26 +312,59 @@ static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
315 } 312 }
316 spin_unlock(&txq->lock); 313 spin_unlock(&txq->lock);
317 314
318
319 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id, 315 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
320 jiffies_to_msecs(trans_pcie->wd_timeout)); 316 jiffies_to_msecs(trans_pcie->wd_timeout));
321 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", 317 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
322 txq->q.read_ptr, txq->q.write_ptr); 318 txq->q.read_ptr, txq->q.write_ptr);
323 IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n", 319
324 iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq->q.id)) 320 iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
325 & (TFD_QUEUE_SIZE_MAX - 1), 321
326 iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq->q.id))); 322 iwl_print_hex_error(trans, buf, sizeof(buf));
323
324 for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
325 IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
326 iwl_read_direct32(trans, FH_TX_TRB_REG(i)));
327
328 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
329 u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
330 u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
331 bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
332 u32 tbl_dw =
333 iwl_read_targ_mem(trans,
334 trans_pcie->scd_base_addr +
335 SCD_TRANS_TBL_OFFSET_QUEUE(i));
336
337 if (i & 0x1)
338 tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
339 else
340 tbl_dw = tbl_dw & 0x0000FFFF;
341
342 IWL_ERR(trans,
343 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
344 i, active ? "" : "in", fifo, tbl_dw,
345 iwl_read_prph(trans,
346 SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1),
347 iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
348 }
349
350 for (i = q->read_ptr; i != q->write_ptr;
351 i = iwl_queue_inc_wrap(i, q->n_bd)) {
352 struct iwl_tx_cmd *tx_cmd =
353 (struct iwl_tx_cmd *)txq->entries[i].cmd->payload;
354 IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
355 get_unaligned_le32(&tx_cmd->scratch));
356 }
327 357
328 iwl_op_mode_nic_error(trans->op_mode); 358 iwl_op_mode_nic_error(trans->op_mode);
329} 359}
330 360
331static int iwl_trans_txq_alloc(struct iwl_trans *trans, 361static int iwl_trans_txq_alloc(struct iwl_trans *trans,
332 struct iwl_tx_queue *txq, int slots_num, 362 struct iwl_tx_queue *txq, int slots_num,
333 u32 txq_id) 363 u32 txq_id)
334{ 364{
365 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
335 size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX; 366 size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
336 int i; 367 int i;
337 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
338 368
339 if (WARN_ON(txq->entries || txq->tfds)) 369 if (WARN_ON(txq->entries || txq->tfds))
340 return -EINVAL; 370 return -EINVAL;
@@ -435,7 +465,7 @@ static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
435 465
436 spin_lock_bh(&txq->lock); 466 spin_lock_bh(&txq->lock);
437 while (q->write_ptr != q->read_ptr) { 467 while (q->write_ptr != q->read_ptr) {
438 iwlagn_txq_free_tfd(trans, txq, dma_dir); 468 iwl_txq_free_tfd(trans, txq, dma_dir);
439 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); 469 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
440 } 470 }
441 spin_unlock_bh(&txq->lock); 471 spin_unlock_bh(&txq->lock);
@@ -455,6 +485,7 @@ static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
455 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; 485 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
456 struct device *dev = trans->dev; 486 struct device *dev = trans->dev;
457 int i; 487 int i;
488
458 if (WARN_ON(!txq)) 489 if (WARN_ON(!txq))
459 return; 490 return;
460 491
@@ -574,11 +605,11 @@ error:
574} 605}
575static int iwl_tx_init(struct iwl_trans *trans) 606static int iwl_tx_init(struct iwl_trans *trans)
576{ 607{
608 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
577 int ret; 609 int ret;
578 int txq_id, slots_num; 610 int txq_id, slots_num;
579 unsigned long flags; 611 unsigned long flags;
580 bool alloc = false; 612 bool alloc = false;
581 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
582 613
583 if (!trans_pcie->txq) { 614 if (!trans_pcie->txq) {
584 ret = iwl_trans_tx_alloc(trans); 615 ret = iwl_trans_tx_alloc(trans);
@@ -643,10 +674,9 @@ static void iwl_set_pwr_vmain(struct iwl_trans *trans)
643 674
644static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans) 675static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans)
645{ 676{
677 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
646 int pos; 678 int pos;
647 u16 pci_lnk_ctl; 679 u16 pci_lnk_ctl;
648 struct iwl_trans_pcie *trans_pcie =
649 IWL_TRANS_GET_PCIE_TRANS(trans);
650 680
651 struct pci_dev *pci_dev = trans_pcie->pci_dev; 681 struct pci_dev *pci_dev = trans_pcie->pci_dev;
652 682
@@ -700,14 +730,14 @@ static int iwl_apm_init(struct iwl_trans *trans)
700 730
701 /* Disable L0S exit timer (platform NMI Work/Around) */ 731 /* Disable L0S exit timer (platform NMI Work/Around) */
702 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, 732 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
703 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); 733 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
704 734
705 /* 735 /*
706 * Disable L0s without affecting L1; 736 * Disable L0s without affecting L1;
707 * don't wait for ICH L0s (ICH bug W/A) 737 * don't wait for ICH L0s (ICH bug W/A)
708 */ 738 */
709 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, 739 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
710 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); 740 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
711 741
712 /* Set FH wait threshold to maximum (HW error during stress W/A) */ 742 /* Set FH wait threshold to maximum (HW error during stress W/A) */
713 iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); 743 iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
@@ -717,7 +747,7 @@ static int iwl_apm_init(struct iwl_trans *trans)
717 * wake device's PCI Express link L1a -> L0s 747 * wake device's PCI Express link L1a -> L0s
718 */ 748 */
719 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 749 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
720 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); 750 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
721 751
722 iwl_apm_config(trans); 752 iwl_apm_config(trans);
723 753
@@ -738,8 +768,8 @@ static int iwl_apm_init(struct iwl_trans *trans)
738 * and accesses to uCode SRAM. 768 * and accesses to uCode SRAM.
739 */ 769 */
740 ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 770 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
741 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 771 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
742 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); 772 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
743 if (ret < 0) { 773 if (ret < 0) {
744 IWL_DEBUG_INFO(trans, "Failed to init the card\n"); 774 IWL_DEBUG_INFO(trans, "Failed to init the card\n");
745 goto out; 775 goto out;
@@ -773,8 +803,8 @@ static int iwl_apm_stop_master(struct iwl_trans *trans)
773 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); 803 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
774 804
775 ret = iwl_poll_bit(trans, CSR_RESET, 805 ret = iwl_poll_bit(trans, CSR_RESET,
776 CSR_RESET_REG_FLAG_MASTER_DISABLED, 806 CSR_RESET_REG_FLAG_MASTER_DISABLED,
777 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); 807 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
778 if (ret) 808 if (ret)
779 IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n"); 809 IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
780 810
@@ -816,8 +846,7 @@ static int iwl_nic_init(struct iwl_trans *trans)
816 iwl_apm_init(trans); 846 iwl_apm_init(trans);
817 847
818 /* Set interrupt coalescing calibration timer to default (512 usecs) */ 848 /* Set interrupt coalescing calibration timer to default (512 usecs) */
819 iwl_write8(trans, CSR_INT_COALESCING, 849 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
820 IWL_HOST_INT_CALIB_TIMEOUT_DEF);
821 850
822 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 851 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
823 852
@@ -836,8 +865,8 @@ static int iwl_nic_init(struct iwl_trans *trans)
836 865
837 if (trans->cfg->base_params->shadow_reg_enable) { 866 if (trans->cfg->base_params->shadow_reg_enable) {
838 /* enable shadow regs in HW */ 867 /* enable shadow regs in HW */
839 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 868 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
840 0x800FFFFF); 869 IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
841 } 870 }
842 871
843 return 0; 872 return 0;
@@ -851,13 +880,13 @@ static int iwl_set_hw_ready(struct iwl_trans *trans)
851 int ret; 880 int ret;
852 881
853 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 882 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
854 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); 883 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
855 884
856 /* See if we got it */ 885 /* See if we got it */
857 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, 886 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
858 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, 887 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
859 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, 888 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
860 HW_READY_TIMEOUT); 889 HW_READY_TIMEOUT);
861 890
862 IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : ""); 891 IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
863 return ret; 892 return ret;
@@ -877,11 +906,11 @@ static int iwl_prepare_card_hw(struct iwl_trans *trans)
877 906
878 /* If HW is not ready, prepare the conditions to check again */ 907 /* If HW is not ready, prepare the conditions to check again */
879 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 908 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
880 CSR_HW_IF_CONFIG_REG_PREPARE); 909 CSR_HW_IF_CONFIG_REG_PREPARE);
881 910
882 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, 911 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
883 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 912 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
884 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000); 913 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
885 914
886 if (ret < 0) 915 if (ret < 0)
887 return ret; 916 return ret;
@@ -908,32 +937,33 @@ static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
908 trans_pcie->ucode_write_complete = false; 937 trans_pcie->ucode_write_complete = false;
909 938
910 iwl_write_direct32(trans, 939 iwl_write_direct32(trans,
911 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), 940 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
912 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); 941 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
913 942
914 iwl_write_direct32(trans, 943 iwl_write_direct32(trans,
915 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr); 944 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
945 dst_addr);
916 946
917 iwl_write_direct32(trans, 947 iwl_write_direct32(trans,
918 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), 948 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
919 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); 949 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
920 950
921 iwl_write_direct32(trans, 951 iwl_write_direct32(trans,
922 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), 952 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
923 (iwl_get_dma_hi_addr(phy_addr) 953 (iwl_get_dma_hi_addr(phy_addr)
924 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); 954 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
925 955
926 iwl_write_direct32(trans, 956 iwl_write_direct32(trans,
927 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), 957 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
928 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM | 958 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
929 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX | 959 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
930 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); 960 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
931 961
932 iwl_write_direct32(trans, 962 iwl_write_direct32(trans,
933 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), 963 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
934 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 964 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
935 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | 965 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
936 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); 966 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
937 967
938 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n", 968 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
939 section_num); 969 section_num);
@@ -1016,15 +1046,12 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1016 1046
1017/* 1047/*
1018 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask 1048 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
1019 * must be called under the irq lock and with MAC access
1020 */ 1049 */
1021static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask) 1050static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
1022{ 1051{
1023 struct iwl_trans_pcie __maybe_unused *trans_pcie = 1052 struct iwl_trans_pcie __maybe_unused *trans_pcie =
1024 IWL_TRANS_GET_PCIE_TRANS(trans); 1053 IWL_TRANS_GET_PCIE_TRANS(trans);
1025 1054
1026 lockdep_assert_held(&trans_pcie->irq_lock);
1027
1028 iwl_write_prph(trans, SCD_TXFACT, mask); 1055 iwl_write_prph(trans, SCD_TXFACT, mask);
1029} 1056}
1030 1057
@@ -1032,11 +1059,12 @@ static void iwl_tx_start(struct iwl_trans *trans)
1032{ 1059{
1033 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1060 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1034 u32 a; 1061 u32 a;
1035 unsigned long flags; 1062 int chan;
1036 int i, chan;
1037 u32 reg_val; 1063 u32 reg_val;
1038 1064
1039 spin_lock_irqsave(&trans_pcie->irq_lock, flags); 1065 /* make sure all queue are not stopped/used */
1066 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
1067 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
1040 1068
1041 trans_pcie->scd_base_addr = 1069 trans_pcie->scd_base_addr =
1042 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); 1070 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
@@ -1063,64 +1091,26 @@ static void iwl_tx_start(struct iwl_trans *trans)
1063 */ 1091 */
1064 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); 1092 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
1065 1093
1094 iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
1095 trans_pcie->cmd_fifo);
1096
1097 /* Activate all Tx DMA/FIFO channels */
1098 iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
1099
1066 /* Enable DMA channel */ 1100 /* Enable DMA channel */
1067 for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++) 1101 for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
1068 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), 1102 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
1069 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 1103 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1070 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); 1104 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1071 1105
1072 /* Update FH chicken bits */ 1106 /* Update FH chicken bits */
1073 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); 1107 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
1074 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, 1108 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
1075 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); 1109 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1076 1110
1077 iwl_write_prph(trans, SCD_QUEUECHAIN_SEL,
1078 SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie));
1079 iwl_write_prph(trans, SCD_AGGR_SEL, 0);
1080
1081 /* initiate the queues */
1082 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
1083 iwl_write_prph(trans, SCD_QUEUE_RDPTR(i), 0);
1084 iwl_write_direct32(trans, HBUS_TARG_WRPTR, 0 | (i << 8));
1085 iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
1086 SCD_CONTEXT_QUEUE_OFFSET(i), 0);
1087 iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
1088 SCD_CONTEXT_QUEUE_OFFSET(i) +
1089 sizeof(u32),
1090 ((SCD_WIN_SIZE <<
1091 SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1092 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1093 ((SCD_FRAME_LIMIT <<
1094 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1095 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1096 }
1097
1098 iwl_write_prph(trans, SCD_INTERRUPT_MASK,
1099 IWL_MASK(0, trans->cfg->base_params->num_of_queues));
1100
1101 /* Activate all Tx DMA/FIFO channels */
1102 iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
1103
1104 iwl_trans_set_wr_ptrs(trans, trans_pcie->cmd_queue, 0);
1105
1106 /* make sure all queue are not stopped/used */
1107 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
1108 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
1109
1110 for (i = 0; i < trans_pcie->n_q_to_fifo; i++) {
1111 int fifo = trans_pcie->setup_q_to_fifo[i];
1112
1113 set_bit(i, trans_pcie->queue_used);
1114
1115 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i],
1116 fifo, true);
1117 }
1118
1119 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1120
1121 /* Enable L1-Active */ 1111 /* Enable L1-Active */
1122 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, 1112 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
1123 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 1113 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1124} 1114}
1125 1115
1126static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans) 1116static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)
@@ -1134,9 +1124,9 @@ static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)
1134 */ 1124 */
1135static int iwl_trans_tx_stop(struct iwl_trans *trans) 1125static int iwl_trans_tx_stop(struct iwl_trans *trans)
1136{ 1126{
1127 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1137 int ch, txq_id, ret; 1128 int ch, txq_id, ret;
1138 unsigned long flags; 1129 unsigned long flags;
1139 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1140 1130
1141 /* Turn off all Tx DMA fifos */ 1131 /* Turn off all Tx DMA fifos */
1142 spin_lock_irqsave(&trans_pcie->irq_lock, flags); 1132 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
@@ -1148,18 +1138,19 @@ static int iwl_trans_tx_stop(struct iwl_trans *trans)
1148 iwl_write_direct32(trans, 1138 iwl_write_direct32(trans,
1149 FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); 1139 FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
1150 ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG, 1140 ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
1151 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1141 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
1152 1000);
1153 if (ret < 0) 1142 if (ret < 0)
1154 IWL_ERR(trans, "Failing on timeout while stopping" 1143 IWL_ERR(trans,
1155 " DMA channel %d [0x%08x]", ch, 1144 "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
1156 iwl_read_direct32(trans, 1145 ch,
1157 FH_TSSR_TX_STATUS_REG)); 1146 iwl_read_direct32(trans,
1147 FH_TSSR_TX_STATUS_REG));
1158 } 1148 }
1159 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 1149 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1160 1150
1161 if (!trans_pcie->txq) { 1151 if (!trans_pcie->txq) {
1162 IWL_WARN(trans, "Stopping tx queues that aren't allocated..."); 1152 IWL_WARN(trans,
1153 "Stopping tx queues that aren't allocated...\n");
1163 return 0; 1154 return 0;
1164 } 1155 }
1165 1156
@@ -1173,8 +1164,8 @@ static int iwl_trans_tx_stop(struct iwl_trans *trans)
1173 1164
1174static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) 1165static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1175{ 1166{
1176 unsigned long flags;
1177 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1167 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1168 unsigned long flags;
1178 1169
1179 /* tell the device to stop sending interrupts */ 1170 /* tell the device to stop sending interrupts */
1180 spin_lock_irqsave(&trans_pcie->irq_lock, flags); 1171 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
@@ -1204,7 +1195,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1204 1195
1205 /* Make sure (redundant) we've released our request to stay awake */ 1196 /* Make sure (redundant) we've released our request to stay awake */
1206 iwl_clear_bit(trans, CSR_GP_CNTRL, 1197 iwl_clear_bit(trans, CSR_GP_CNTRL,
1207 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1198 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1208 1199
1209 /* Stop the device, and put it in low power state */ 1200 /* Stop the device, and put it in low power state */
1210 iwl_apm_stop(trans); 1201 iwl_apm_stop(trans);
@@ -1273,13 +1264,27 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1273 1264
1274 spin_lock(&txq->lock); 1265 spin_lock(&txq->lock);
1275 1266
1267 /* In AGG mode, the index in the ring must correspond to the WiFi
1268 * sequence number. This is a HW requirements to help the SCD to parse
1269 * the BA.
1270 * Check here that the packets are in the right place on the ring.
1271 */
1272#ifdef CONFIG_IWLWIFI_DEBUG
1273 wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
1274 WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) &&
1275 ((wifi_seq & 0xff) != q->write_ptr),
1276 "Q: %d WiFi Seq %d tfdNum %d",
1277 txq_id, wifi_seq, q->write_ptr);
1278#endif
1279
1276 /* Set up driver data for this TFD */ 1280 /* Set up driver data for this TFD */
1277 txq->entries[q->write_ptr].skb = skb; 1281 txq->entries[q->write_ptr].skb = skb;
1278 txq->entries[q->write_ptr].cmd = dev_cmd; 1282 txq->entries[q->write_ptr].cmd = dev_cmd;
1279 1283
1280 dev_cmd->hdr.cmd = REPLY_TX; 1284 dev_cmd->hdr.cmd = REPLY_TX;
1281 dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 1285 dev_cmd->hdr.sequence =
1282 INDEX_TO_SEQ(q->write_ptr))); 1286 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1287 INDEX_TO_SEQ(q->write_ptr)));
1283 1288
1284 /* Set up first empty entry in queue's array of Tx/cmd buffers */ 1289 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1285 out_meta = &txq->entries[q->write_ptr].meta; 1290 out_meta = &txq->entries[q->write_ptr].meta;
@@ -1344,7 +1349,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1344 1349
1345 /* take back ownership of DMA buffer to enable update */ 1350 /* take back ownership of DMA buffer to enable update */
1346 dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen, 1351 dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
1347 DMA_BIDIRECTIONAL); 1352 DMA_BIDIRECTIONAL);
1348 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); 1353 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1349 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); 1354 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1350 1355
@@ -1356,16 +1361,17 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1356 iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); 1361 iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
1357 1362
1358 dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen, 1363 dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
1359 DMA_BIDIRECTIONAL); 1364 DMA_BIDIRECTIONAL);
1360 1365
1361 trace_iwlwifi_dev_tx(trans->dev, 1366 trace_iwlwifi_dev_tx(trans->dev,
1362 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr], 1367 &txq->tfds[txq->q.write_ptr],
1363 sizeof(struct iwl_tfd), 1368 sizeof(struct iwl_tfd),
1364 &dev_cmd->hdr, firstlen, 1369 &dev_cmd->hdr, firstlen,
1365 skb->data + hdr_len, secondlen); 1370 skb->data + hdr_len, secondlen);
1366 1371
1367 /* start timer if queue currently empty */ 1372 /* start timer if queue currently empty */
1368 if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout) 1373 if (txq->need_update && q->read_ptr == q->write_ptr &&
1374 trans_pcie->wd_timeout)
1369 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); 1375 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
1370 1376
1371 /* Tell device the write index *just past* this latest filled TFD */ 1377 /* Tell device the write index *just past* this latest filled TFD */
@@ -1395,8 +1401,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1395 1401
1396static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) 1402static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1397{ 1403{
1398 struct iwl_trans_pcie *trans_pcie = 1404 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1399 IWL_TRANS_GET_PCIE_TRANS(trans);
1400 int err; 1405 int err;
1401 bool hw_rfkill; 1406 bool hw_rfkill;
1402 1407
@@ -1409,7 +1414,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1409 iwl_alloc_isr_ict(trans); 1414 iwl_alloc_isr_ict(trans);
1410 1415
1411 err = request_irq(trans_pcie->irq, iwl_isr_ict, IRQF_SHARED, 1416 err = request_irq(trans_pcie->irq, iwl_isr_ict, IRQF_SHARED,
1412 DRV_NAME, trans); 1417 DRV_NAME, trans);
1413 if (err) { 1418 if (err) {
1414 IWL_ERR(trans, "Error allocating IRQ %d\n", 1419 IWL_ERR(trans, "Error allocating IRQ %d\n",
1415 trans_pcie->irq); 1420 trans_pcie->irq);
@@ -1422,7 +1427,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1422 1427
1423 err = iwl_prepare_card_hw(trans); 1428 err = iwl_prepare_card_hw(trans);
1424 if (err) { 1429 if (err) {
1425 IWL_ERR(trans, "Error while preparing HW: %d", err); 1430 IWL_ERR(trans, "Error while preparing HW: %d\n", err);
1426 goto err_free_irq; 1431 goto err_free_irq;
1427 } 1432 }
1428 1433
@@ -1447,9 +1452,9 @@ error:
1447static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans, 1452static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
1448 bool op_mode_leaving) 1453 bool op_mode_leaving)
1449{ 1454{
1455 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1450 bool hw_rfkill; 1456 bool hw_rfkill;
1451 unsigned long flags; 1457 unsigned long flags;
1452 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1453 1458
1454 iwl_apm_stop(trans); 1459 iwl_apm_stop(trans);
1455 1460
@@ -1520,6 +1525,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1520 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1525 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1521 1526
1522 trans_pcie->cmd_queue = trans_cfg->cmd_queue; 1527 trans_pcie->cmd_queue = trans_cfg->cmd_queue;
1528 trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
1523 if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) 1529 if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
1524 trans_pcie->n_no_reclaim_cmds = 0; 1530 trans_pcie->n_no_reclaim_cmds = 0;
1525 else 1531 else
@@ -1528,17 +1534,6 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1528 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds, 1534 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
1529 trans_pcie->n_no_reclaim_cmds * sizeof(u8)); 1535 trans_pcie->n_no_reclaim_cmds * sizeof(u8));
1530 1536
1531 trans_pcie->n_q_to_fifo = trans_cfg->n_queue_to_fifo;
1532
1533 if (WARN_ON(trans_pcie->n_q_to_fifo > IWL_MAX_HW_QUEUES))
1534 trans_pcie->n_q_to_fifo = IWL_MAX_HW_QUEUES;
1535
1536 /* at least the command queue must be mapped */
1537 WARN_ON(!trans_pcie->n_q_to_fifo);
1538
1539 memcpy(trans_pcie->setup_q_to_fifo, trans_cfg->queue_to_fifo,
1540 trans_pcie->n_q_to_fifo * sizeof(u8));
1541
1542 trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k; 1537 trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k;
1543 if (trans_pcie->rx_buf_size_8k) 1538 if (trans_pcie->rx_buf_size_8k)
1544 trans_pcie->rx_page_order = get_order(8 * 1024); 1539 trans_pcie->rx_page_order = get_order(8 * 1024);
@@ -1553,8 +1548,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1553 1548
1554void iwl_trans_pcie_free(struct iwl_trans *trans) 1549void iwl_trans_pcie_free(struct iwl_trans *trans)
1555{ 1550{
1556 struct iwl_trans_pcie *trans_pcie = 1551 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1557 IWL_TRANS_GET_PCIE_TRANS(trans);
1558 1552
1559 iwl_trans_pcie_tx_free(trans); 1553 iwl_trans_pcie_tx_free(trans);
1560#ifndef CONFIG_IWLWIFI_IDI 1554#ifndef CONFIG_IWLWIFI_IDI
@@ -1569,6 +1563,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
1569 iounmap(trans_pcie->hw_base); 1563 iounmap(trans_pcie->hw_base);
1570 pci_release_regions(trans_pcie->pci_dev); 1564 pci_release_regions(trans_pcie->pci_dev);
1571 pci_disable_device(trans_pcie->pci_dev); 1565 pci_disable_device(trans_pcie->pci_dev);
1566 kmem_cache_destroy(trans->dev_cmd_pool);
1572 1567
1573 kfree(trans); 1568 kfree(trans);
1574} 1569}
@@ -1816,8 +1811,8 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
1816}; 1811};
1817 1812
1818static ssize_t iwl_dbgfs_tx_queue_read(struct file *file, 1813static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1819 char __user *user_buf, 1814 char __user *user_buf,
1820 size_t count, loff_t *ppos) 1815 size_t count, loff_t *ppos)
1821{ 1816{
1822 struct iwl_trans *trans = file->private_data; 1817 struct iwl_trans *trans = file->private_data;
1823 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1818 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1853,11 +1848,11 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1853} 1848}
1854 1849
1855static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, 1850static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1856 char __user *user_buf, 1851 char __user *user_buf,
1857 size_t count, loff_t *ppos) { 1852 size_t count, loff_t *ppos)
1853{
1858 struct iwl_trans *trans = file->private_data; 1854 struct iwl_trans *trans = file->private_data;
1859 struct iwl_trans_pcie *trans_pcie = 1855 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1860 IWL_TRANS_GET_PCIE_TRANS(trans);
1861 struct iwl_rx_queue *rxq = &trans_pcie->rxq; 1856 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
1862 char buf[256]; 1857 char buf[256];
1863 int pos = 0; 1858 int pos = 0;
@@ -1881,11 +1876,10 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1881 1876
1882static ssize_t iwl_dbgfs_interrupt_read(struct file *file, 1877static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
1883 char __user *user_buf, 1878 char __user *user_buf,
1884 size_t count, loff_t *ppos) { 1879 size_t count, loff_t *ppos)
1885 1880{
1886 struct iwl_trans *trans = file->private_data; 1881 struct iwl_trans *trans = file->private_data;
1887 struct iwl_trans_pcie *trans_pcie = 1882 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1888 IWL_TRANS_GET_PCIE_TRANS(trans);
1889 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 1883 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1890 1884
1891 int pos = 0; 1885 int pos = 0;
@@ -1943,8 +1937,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
1943 size_t count, loff_t *ppos) 1937 size_t count, loff_t *ppos)
1944{ 1938{
1945 struct iwl_trans *trans = file->private_data; 1939 struct iwl_trans *trans = file->private_data;
1946 struct iwl_trans_pcie *trans_pcie = 1940 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1947 IWL_TRANS_GET_PCIE_TRANS(trans);
1948 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 1941 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1949 1942
1950 char buf[8]; 1943 char buf[8];
@@ -1964,8 +1957,8 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
1964} 1957}
1965 1958
1966static ssize_t iwl_dbgfs_csr_write(struct file *file, 1959static ssize_t iwl_dbgfs_csr_write(struct file *file,
1967 const char __user *user_buf, 1960 const char __user *user_buf,
1968 size_t count, loff_t *ppos) 1961 size_t count, loff_t *ppos)
1969{ 1962{
1970 struct iwl_trans *trans = file->private_data; 1963 struct iwl_trans *trans = file->private_data;
1971 char buf[8]; 1964 char buf[8];
@@ -1985,8 +1978,8 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
1985} 1978}
1986 1979
1987static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, 1980static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
1988 char __user *user_buf, 1981 char __user *user_buf,
1989 size_t count, loff_t *ppos) 1982 size_t count, loff_t *ppos)
1990{ 1983{
1991 struct iwl_trans *trans = file->private_data; 1984 struct iwl_trans *trans = file->private_data;
1992 char *buf; 1985 char *buf;
@@ -2012,7 +2005,9 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
2012 if (!trans->op_mode) 2005 if (!trans->op_mode)
2013 return -EAGAIN; 2006 return -EAGAIN;
2014 2007
2008 local_bh_disable();
2015 iwl_op_mode_nic_error(trans->op_mode); 2009 iwl_op_mode_nic_error(trans->op_mode);
2010 local_bh_enable();
2016 2011
2017 return count; 2012 return count;
2018} 2013}
@@ -2029,7 +2024,7 @@ DEBUGFS_WRITE_FILE_OPS(fw_restart);
2029 * 2024 *
2030 */ 2025 */
2031static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans, 2026static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
2032 struct dentry *dir) 2027 struct dentry *dir)
2033{ 2028{
2034 DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR); 2029 DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
2035 DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR); 2030 DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
@@ -2041,9 +2036,10 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
2041} 2036}
2042#else 2037#else
2043static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans, 2038static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
2044 struct dentry *dir) 2039 struct dentry *dir)
2045{ return 0; } 2040{
2046 2041 return 0;
2042}
2047#endif /*CONFIG_IWLWIFI_DEBUGFS */ 2043#endif /*CONFIG_IWLWIFI_DEBUGFS */
2048 2044
2049static const struct iwl_trans_ops trans_ops_pcie = { 2045static const struct iwl_trans_ops trans_ops_pcie = {
@@ -2060,8 +2056,8 @@ static const struct iwl_trans_ops trans_ops_pcie = {
2060 .tx = iwl_trans_pcie_tx, 2056 .tx = iwl_trans_pcie_tx,
2061 .reclaim = iwl_trans_pcie_reclaim, 2057 .reclaim = iwl_trans_pcie_reclaim,
2062 2058
2063 .tx_agg_disable = iwl_trans_pcie_tx_agg_disable, 2059 .txq_disable = iwl_trans_pcie_txq_disable,
2064 .tx_agg_setup = iwl_trans_pcie_tx_agg_setup, 2060 .txq_enable = iwl_trans_pcie_txq_enable,
2065 2061
2066 .dbgfs_register = iwl_trans_pcie_dbgfs_register, 2062 .dbgfs_register = iwl_trans_pcie_dbgfs_register,
2067 2063
@@ -2084,11 +2080,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2084{ 2080{
2085 struct iwl_trans_pcie *trans_pcie; 2081 struct iwl_trans_pcie *trans_pcie;
2086 struct iwl_trans *trans; 2082 struct iwl_trans *trans;
2083 char cmd_pool_name[100];
2087 u16 pci_cmd; 2084 u16 pci_cmd;
2088 int err; 2085 int err;
2089 2086
2090 trans = kzalloc(sizeof(struct iwl_trans) + 2087 trans = kzalloc(sizeof(struct iwl_trans) +
2091 sizeof(struct iwl_trans_pcie), GFP_KERNEL); 2088 sizeof(struct iwl_trans_pcie), GFP_KERNEL);
2092 2089
2093 if (WARN_ON(!trans)) 2090 if (WARN_ON(!trans))
2094 return NULL; 2091 return NULL;
@@ -2104,7 +2101,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2104 /* W/A - seems to solve weird behavior. We need to remove this if we 2101 /* W/A - seems to solve weird behavior. We need to remove this if we
2105 * don't want to stay in L1 all the time. This wastes a lot of power */ 2102 * don't want to stay in L1 all the time. This wastes a lot of power */
2106 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | 2103 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
2107 PCIE_LINK_STATE_CLKPM); 2104 PCIE_LINK_STATE_CLKPM);
2108 2105
2109 if (pci_enable_device(pdev)) { 2106 if (pci_enable_device(pdev)) {
2110 err = -ENODEV; 2107 err = -ENODEV;
@@ -2120,7 +2117,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2120 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2117 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2121 if (!err) 2118 if (!err)
2122 err = pci_set_consistent_dma_mask(pdev, 2119 err = pci_set_consistent_dma_mask(pdev,
2123 DMA_BIT_MASK(32)); 2120 DMA_BIT_MASK(32));
2124 /* both attempts failed: */ 2121 /* both attempts failed: */
2125 if (err) { 2122 if (err) {
2126 dev_printk(KERN_ERR, &pdev->dev, 2123 dev_printk(KERN_ERR, &pdev->dev,
@@ -2131,25 +2128,26 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2131 2128
2132 err = pci_request_regions(pdev, DRV_NAME); 2129 err = pci_request_regions(pdev, DRV_NAME);
2133 if (err) { 2130 if (err) {
2134 dev_printk(KERN_ERR, &pdev->dev, "pci_request_regions failed"); 2131 dev_printk(KERN_ERR, &pdev->dev,
2132 "pci_request_regions failed\n");
2135 goto out_pci_disable_device; 2133 goto out_pci_disable_device;
2136 } 2134 }
2137 2135
2138 trans_pcie->hw_base = pci_ioremap_bar(pdev, 0); 2136 trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
2139 if (!trans_pcie->hw_base) { 2137 if (!trans_pcie->hw_base) {
2140 dev_printk(KERN_ERR, &pdev->dev, "pci_ioremap_bar failed"); 2138 dev_printk(KERN_ERR, &pdev->dev, "pci_ioremap_bar failed\n");
2141 err = -ENODEV; 2139 err = -ENODEV;
2142 goto out_pci_release_regions; 2140 goto out_pci_release_regions;
2143 } 2141 }
2144 2142
2145 dev_printk(KERN_INFO, &pdev->dev, 2143 dev_printk(KERN_INFO, &pdev->dev,
2146 "pci_resource_len = 0x%08llx\n", 2144 "pci_resource_len = 0x%08llx\n",
2147 (unsigned long long) pci_resource_len(pdev, 0)); 2145 (unsigned long long) pci_resource_len(pdev, 0));
2148 dev_printk(KERN_INFO, &pdev->dev, 2146 dev_printk(KERN_INFO, &pdev->dev,
2149 "pci_resource_base = %p\n", trans_pcie->hw_base); 2147 "pci_resource_base = %p\n", trans_pcie->hw_base);
2150 2148
2151 dev_printk(KERN_INFO, &pdev->dev, 2149 dev_printk(KERN_INFO, &pdev->dev,
2152 "HW Revision ID = 0x%X\n", pdev->revision); 2150 "HW Revision ID = 0x%X\n", pdev->revision);
2153 2151
2154 /* We disable the RETRY_TIMEOUT register (0x41) to keep 2152 /* We disable the RETRY_TIMEOUT register (0x41) to keep
2155 * PCI Tx retries from interfering with C3 CPU state */ 2153 * PCI Tx retries from interfering with C3 CPU state */
@@ -2158,7 +2156,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2158 err = pci_enable_msi(pdev); 2156 err = pci_enable_msi(pdev);
2159 if (err) 2157 if (err)
2160 dev_printk(KERN_ERR, &pdev->dev, 2158 dev_printk(KERN_ERR, &pdev->dev,
2161 "pci_enable_msi failed(0X%x)", err); 2159 "pci_enable_msi failed(0X%x)\n", err);
2162 2160
2163 trans->dev = &pdev->dev; 2161 trans->dev = &pdev->dev;
2164 trans_pcie->irq = pdev->irq; 2162 trans_pcie->irq = pdev->irq;
@@ -2180,8 +2178,25 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2180 init_waitqueue_head(&trans->wait_command_queue); 2178 init_waitqueue_head(&trans->wait_command_queue);
2181 spin_lock_init(&trans->reg_lock); 2179 spin_lock_init(&trans->reg_lock);
2182 2180
2181 snprintf(cmd_pool_name, sizeof(cmd_pool_name), "iwl_cmd_pool:%s",
2182 dev_name(trans->dev));
2183
2184 trans->dev_cmd_headroom = 0;
2185 trans->dev_cmd_pool =
2186 kmem_cache_create(cmd_pool_name,
2187 sizeof(struct iwl_device_cmd)
2188 + trans->dev_cmd_headroom,
2189 sizeof(void *),
2190 SLAB_HWCACHE_ALIGN,
2191 NULL);
2192
2193 if (!trans->dev_cmd_pool)
2194 goto out_pci_disable_msi;
2195
2183 return trans; 2196 return trans;
2184 2197
2198out_pci_disable_msi:
2199 pci_disable_msi(pdev);
2185out_pci_release_regions: 2200out_pci_release_regions:
2186 pci_release_regions(pdev); 2201 pci_release_regions(pdev);
2187out_pci_disable_device: 2202out_pci_disable_device:
@@ -2190,4 +2205,3 @@ out_no_pci:
2190 kfree(trans); 2205 kfree(trans);
2191 return NULL; 2206 return NULL;
2192} 2207}
2193
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index a8750238ee09..6baf8deef519 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -34,11 +34,10 @@
34#include "iwl-csr.h" 34#include "iwl-csr.h"
35#include "iwl-prph.h" 35#include "iwl-prph.h"
36#include "iwl-io.h" 36#include "iwl-io.h"
37#include "iwl-agn-hw.h"
38#include "iwl-op-mode.h" 37#include "iwl-op-mode.h"
39#include "iwl-trans-pcie-int.h" 38#include "internal.h"
40/* FIXME: need to abstract out TX command (once we know what it looks like) */ 39/* FIXME: need to abstract out TX command (once we know what it looks like) */
41#include "iwl-commands.h" 40#include "dvm/commands.h"
42 41
43#define IWL_TX_CRC_SIZE 4 42#define IWL_TX_CRC_SIZE 4
44#define IWL_TX_DELIMITER_SIZE 4 43#define IWL_TX_DELIMITER_SIZE 4
@@ -47,12 +46,11 @@
47 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array 46 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
48 */ 47 */
49void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, 48void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
50 struct iwl_tx_queue *txq, 49 struct iwl_tx_queue *txq,
51 u16 byte_cnt) 50 u16 byte_cnt)
52{ 51{
53 struct iwlagn_scd_bc_tbl *scd_bc_tbl; 52 struct iwlagn_scd_bc_tbl *scd_bc_tbl;
54 struct iwl_trans_pcie *trans_pcie = 53 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
55 IWL_TRANS_GET_PCIE_TRANS(trans);
56 int write_ptr = txq->q.write_ptr; 54 int write_ptr = txq->q.write_ptr;
57 int txq_id = txq->q.id; 55 int txq_id = txq->q.id;
58 u8 sec_ctl = 0; 56 u8 sec_ctl = 0;
@@ -178,8 +176,8 @@ static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
178 return tfd->num_tbs & 0x1f; 176 return tfd->num_tbs & 0x1f;
179} 177}
180 178
181static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta, 179static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
182 struct iwl_tfd *tfd, enum dma_data_direction dma_dir) 180 struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
183{ 181{
184 int i; 182 int i;
185 int num_tbs; 183 int num_tbs;
@@ -209,7 +207,7 @@ static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
209} 207}
210 208
211/** 209/**
212 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] 210 * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
213 * @trans - transport private data 211 * @trans - transport private data
214 * @txq - tx queue 212 * @txq - tx queue
215 * @dma_dir - the direction of the DMA mapping 213 * @dma_dir - the direction of the DMA mapping
@@ -217,8 +215,8 @@ static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
217 * Does NOT advance any TFD circular buffer read/write indexes 215 * Does NOT advance any TFD circular buffer read/write indexes
218 * Does NOT free the TFD itself (which is within circular buffer) 216 * Does NOT free the TFD itself (which is within circular buffer)
219 */ 217 */
220void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, 218void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
221 enum dma_data_direction dma_dir) 219 enum dma_data_direction dma_dir)
222{ 220{
223 struct iwl_tfd *tfd_tmp = txq->tfds; 221 struct iwl_tfd *tfd_tmp = txq->tfds;
224 222
@@ -229,8 +227,8 @@ void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
229 lockdep_assert_held(&txq->lock); 227 lockdep_assert_held(&txq->lock);
230 228
231 /* We have only q->n_window txq->entries, but we use q->n_bd tfds */ 229 /* We have only q->n_window txq->entries, but we use q->n_bd tfds */
232 iwlagn_unmap_tfd(trans, &txq->entries[idx].meta, 230 iwl_unmap_tfd(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr],
233 &tfd_tmp[rd_ptr], dma_dir); 231 dma_dir);
234 232
235 /* free SKB */ 233 /* free SKB */
236 if (txq->entries) { 234 if (txq->entries) {
@@ -270,7 +268,7 @@ int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
270 /* Each TFD can point to a maximum 20 Tx buffers */ 268 /* Each TFD can point to a maximum 20 Tx buffers */
271 if (num_tbs >= IWL_NUM_OF_TBS) { 269 if (num_tbs >= IWL_NUM_OF_TBS) {
272 IWL_ERR(trans, "Error can not send more than %d chunks\n", 270 IWL_ERR(trans, "Error can not send more than %d chunks\n",
273 IWL_NUM_OF_TBS); 271 IWL_NUM_OF_TBS);
274 return -EINVAL; 272 return -EINVAL;
275 } 273 }
276 274
@@ -279,7 +277,7 @@ int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
279 277
280 if (unlikely(addr & ~IWL_TX_DMA_MASK)) 278 if (unlikely(addr & ~IWL_TX_DMA_MASK))
281 IWL_ERR(trans, "Unaligned address = %llx\n", 279 IWL_ERR(trans, "Unaligned address = %llx\n",
282 (unsigned long long)addr); 280 (unsigned long long)addr);
283 281
284 iwl_tfd_set_tb(tfd, num_tbs, addr, len); 282 iwl_tfd_set_tb(tfd, num_tbs, addr, len);
285 283
@@ -382,16 +380,14 @@ static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
382 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; 380 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
383} 381}
384 382
385static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid, 383static int iwl_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
386 u16 txq_id) 384 u16 txq_id)
387{ 385{
386 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
388 u32 tbl_dw_addr; 387 u32 tbl_dw_addr;
389 u32 tbl_dw; 388 u32 tbl_dw;
390 u16 scd_q2ratid; 389 u16 scd_q2ratid;
391 390
392 struct iwl_trans_pcie *trans_pcie =
393 IWL_TRANS_GET_PCIE_TRANS(trans);
394
395 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; 391 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
396 392
397 tbl_dw_addr = trans_pcie->scd_base_addr + 393 tbl_dw_addr = trans_pcie->scd_base_addr +
@@ -409,7 +405,7 @@ static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
409 return 0; 405 return 0;
410} 406}
411 407
412static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id) 408static inline void iwl_txq_set_inactive(struct iwl_trans *trans, u16 txq_id)
413{ 409{
414 /* Simply stop the queue, but don't change any configuration; 410 /* Simply stop the queue, but don't change any configuration;
415 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ 411 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
@@ -419,102 +415,87 @@ static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
419 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); 415 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
420} 416}
421 417
422void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, 418void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
423 int txq_id, u32 index) 419 int sta_id, int tid, int frame_limit, u16 ssn)
424{
425 IWL_DEBUG_TX_QUEUES(trans, "Q %d WrPtr: %d\n", txq_id, index & 0xff);
426 iwl_write_direct32(trans, HBUS_TARG_WRPTR,
427 (index & 0xff) | (txq_id << 8));
428 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), index);
429}
430
431void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
432 struct iwl_tx_queue *txq,
433 int tx_fifo_id, bool active)
434{
435 int txq_id = txq->q.id;
436
437 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
438 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
439 (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
440 (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
441 SCD_QUEUE_STTS_REG_MSK);
442
443 if (active)
444 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d\n",
445 txq_id, tx_fifo_id);
446 else
447 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
448}
449
450void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int txq_id, int fifo,
451 int sta_id, int tid, int frame_limit, u16 ssn)
452{ 420{
453 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 421 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
454 unsigned long flags;
455 u16 ra_tid = BUILD_RAxTID(sta_id, tid);
456 422
457 if (test_and_set_bit(txq_id, trans_pcie->queue_used)) 423 if (test_and_set_bit(txq_id, trans_pcie->queue_used))
458 WARN_ONCE(1, "queue %d already used - expect issues", txq_id); 424 WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
459 425
460 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
461
462 /* Stop this Tx queue before configuring it */ 426 /* Stop this Tx queue before configuring it */
463 iwlagn_tx_queue_stop_scheduler(trans, txq_id); 427 iwl_txq_set_inactive(trans, txq_id);
464 428
465 /* Map receiver-address / traffic-ID to this queue */ 429 /* Set this queue as a chain-building queue unless it is CMD queue */
466 iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id); 430 if (txq_id != trans_pcie->cmd_queue)
431 iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));
467 432
468 /* Set this queue as a chain-building queue */ 433 /* If this queue is mapped to a certain station: it is an AGG queue */
469 iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id)); 434 if (sta_id != IWL_INVALID_STATION) {
435 u16 ra_tid = BUILD_RAxTID(sta_id, tid);
470 436
471 /* enable aggregations for the queue */ 437 /* Map receiver-address / traffic-ID to this queue */
472 iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); 438 iwl_txq_set_ratid_map(trans, ra_tid, txq_id);
439
440 /* enable aggregations for the queue */
441 iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
442 } else {
443 /*
444 * disable aggregations for the queue, this will also make the
445 * ra_tid mapping configuration irrelevant since it is now a
446 * non-AGG queue.
447 */
448 iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
449 }
473 450
474 /* Place first TFD at index corresponding to start sequence number. 451 /* Place first TFD at index corresponding to start sequence number.
475 * Assumes that ssn_idx is valid (!= 0xFFF) */ 452 * Assumes that ssn_idx is valid (!= 0xFFF) */
476 trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff); 453 trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
477 trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff); 454 trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
478 iwl_trans_set_wr_ptrs(trans, txq_id, ssn); 455
456 iwl_write_direct32(trans, HBUS_TARG_WRPTR,
457 (ssn & 0xff) | (txq_id << 8));
458 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
479 459
480 /* Set up Tx window size and frame limit for this queue */ 460 /* Set up Tx window size and frame limit for this queue */
481 iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + 461 iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
462 SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
463 iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
482 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), 464 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
483 ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & 465 ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
484 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | 466 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
485 ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & 467 ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
486 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); 468 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
487 469
488 iwl_set_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id));
489
490 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ 470 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
491 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 471 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
492 fifo, true); 472 (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
493 473 (fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
494 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 474 (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
475 SCD_QUEUE_STTS_REG_MSK);
476 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n",
477 txq_id, fifo, ssn & 0xff);
495} 478}
496 479
497void iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int txq_id) 480void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
498{ 481{
499 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 482 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
483 u16 rd_ptr, wr_ptr;
484 int n_bd = trans_pcie->txq[txq_id].q.n_bd;
500 485
501 if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) { 486 if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
502 WARN_ONCE(1, "queue %d not used", txq_id); 487 WARN_ONCE(1, "queue %d not used", txq_id);
503 return; 488 return;
504 } 489 }
505 490
506 iwlagn_tx_queue_stop_scheduler(trans, txq_id); 491 rd_ptr = iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & (n_bd - 1);
507 492 wr_ptr = iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id));
508 iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
509 493
510 trans_pcie->txq[txq_id].q.read_ptr = 0; 494 WARN_ONCE(rd_ptr != wr_ptr, "queue %d isn't empty: [%d,%d]",
511 trans_pcie->txq[txq_id].q.write_ptr = 0; 495 txq_id, rd_ptr, wr_ptr);
512 iwl_trans_set_wr_ptrs(trans, txq_id, 0);
513 496
514 iwl_clear_bits_prph(trans, SCD_INTERRUPT_MASK, BIT(txq_id)); 497 iwl_txq_set_inactive(trans, txq_id);
515 498 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
516 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
517 0, false);
518} 499}
519 500
520/*************** HOST COMMAND QUEUE FUNCTIONS *****/ 501/*************** HOST COMMAND QUEUE FUNCTIONS *****/
@@ -615,13 +596,13 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
615 } 596 }
616 597
617 IWL_DEBUG_HC(trans, 598 IWL_DEBUG_HC(trans,
618 "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", 599 "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
619 trans_pcie_get_cmd_string(trans_pcie, out_cmd->hdr.cmd), 600 trans_pcie_get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
620 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), cmd_size, 601 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
621 q->write_ptr, idx, trans_pcie->cmd_queue); 602 cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
622 603
623 phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size, 604 phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size,
624 DMA_BIDIRECTIONAL); 605 DMA_BIDIRECTIONAL);
625 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) { 606 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
626 idx = -ENOMEM; 607 idx = -ENOMEM;
627 goto out; 608 goto out;
@@ -630,8 +611,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
630 dma_unmap_addr_set(out_meta, mapping, phys_addr); 611 dma_unmap_addr_set(out_meta, mapping, phys_addr);
631 dma_unmap_len_set(out_meta, len, copy_size); 612 dma_unmap_len_set(out_meta, len, copy_size);
632 613
633 iwlagn_txq_attach_buf_to_tfd(trans, txq, 614 iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1);
634 phys_addr, copy_size, 1);
635#ifdef CONFIG_IWLWIFI_DEVICE_TRACING 615#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
636 trace_bufs[0] = &out_cmd->hdr; 616 trace_bufs[0] = &out_cmd->hdr;
637 trace_lens[0] = copy_size; 617 trace_lens[0] = copy_size;
@@ -643,13 +623,12 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
643 continue; 623 continue;
644 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)) 624 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
645 continue; 625 continue;
646 phys_addr = dma_map_single(trans->dev, 626 phys_addr = dma_map_single(trans->dev, (void *)cmd->data[i],
647 (void *)cmd->data[i],
648 cmd->len[i], DMA_BIDIRECTIONAL); 627 cmd->len[i], DMA_BIDIRECTIONAL);
649 if (dma_mapping_error(trans->dev, phys_addr)) { 628 if (dma_mapping_error(trans->dev, phys_addr)) {
650 iwlagn_unmap_tfd(trans, out_meta, 629 iwl_unmap_tfd(trans, out_meta,
651 &txq->tfds[q->write_ptr], 630 &txq->tfds[q->write_ptr],
652 DMA_BIDIRECTIONAL); 631 DMA_BIDIRECTIONAL);
653 idx = -ENOMEM; 632 idx = -ENOMEM;
654 goto out; 633 goto out;
655 } 634 }
@@ -723,9 +702,10 @@ static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
723 lockdep_assert_held(&txq->lock); 702 lockdep_assert_held(&txq->lock);
724 703
725 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) { 704 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
726 IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), " 705 IWL_ERR(trans,
727 "index %d is out of range [0-%d] %d %d.\n", __func__, 706 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
728 txq_id, idx, q->n_bd, q->write_ptr, q->read_ptr); 707 __func__, txq_id, idx, q->n_bd,
708 q->write_ptr, q->read_ptr);
729 return; 709 return;
730 } 710 }
731 711
@@ -733,8 +713,8 @@ static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
733 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 713 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
734 714
735 if (nfreed++ > 0) { 715 if (nfreed++ > 0) {
736 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", idx, 716 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
737 q->write_ptr, q->read_ptr); 717 idx, q->write_ptr, q->read_ptr);
738 iwl_op_mode_nic_error(trans->op_mode); 718 iwl_op_mode_nic_error(trans->op_mode);
739 } 719 }
740 720
@@ -771,9 +751,9 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
771 * in the queue management code. */ 751 * in the queue management code. */
772 if (WARN(txq_id != trans_pcie->cmd_queue, 752 if (WARN(txq_id != trans_pcie->cmd_queue,
773 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", 753 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
774 txq_id, trans_pcie->cmd_queue, sequence, 754 txq_id, trans_pcie->cmd_queue, sequence,
775 trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr, 755 trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
776 trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) { 756 trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
777 iwl_print_hex_error(trans, pkt, 32); 757 iwl_print_hex_error(trans, pkt, 32);
778 return; 758 return;
779 } 759 }
@@ -784,8 +764,7 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
784 cmd = txq->entries[cmd_index].cmd; 764 cmd = txq->entries[cmd_index].cmd;
785 meta = &txq->entries[cmd_index].meta; 765 meta = &txq->entries[cmd_index].meta;
786 766
787 iwlagn_unmap_tfd(trans, meta, &txq->tfds[index], 767 iwl_unmap_tfd(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
788 DMA_BIDIRECTIONAL);
789 768
790 /* Input error checking is done when commands are added to queue. */ 769 /* Input error checking is done when commands are added to queue. */
791 if (meta->flags & CMD_WANT_SKB) { 770 if (meta->flags & CMD_WANT_SKB) {
@@ -870,8 +849,9 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
870 } 849 }
871 850
872 ret = wait_event_timeout(trans->wait_command_queue, 851 ret = wait_event_timeout(trans->wait_command_queue,
873 !test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status), 852 !test_bit(STATUS_HCMD_ACTIVE,
874 HOST_COMPLETE_TIMEOUT); 853 &trans_pcie->status),
854 HOST_COMPLETE_TIMEOUT);
875 if (!ret) { 855 if (!ret) {
876 if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) { 856 if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
877 struct iwl_tx_queue *txq = 857 struct iwl_tx_queue *txq =
@@ -956,10 +936,10 @@ int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
956 936
957 if ((index >= q->n_bd) || 937 if ((index >= q->n_bd) ||
958 (iwl_queue_used(q, last_to_free) == 0)) { 938 (iwl_queue_used(q, last_to_free) == 0)) {
959 IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), " 939 IWL_ERR(trans,
960 "last_to_free %d is out of range [0-%d] %d %d.\n", 940 "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
961 __func__, txq_id, last_to_free, q->n_bd, 941 __func__, txq_id, last_to_free, q->n_bd,
962 q->write_ptr, q->read_ptr); 942 q->write_ptr, q->read_ptr);
963 return 0; 943 return 0;
964 } 944 }
965 945
@@ -979,7 +959,7 @@ int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
979 959
980 iwlagn_txq_inval_byte_cnt_tbl(trans, txq); 960 iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
981 961
982 iwlagn_txq_free_tfd(trans, txq, DMA_TO_DEVICE); 962 iwl_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
983 freed++; 963 freed++;
984 } 964 }
985 965
diff --git a/drivers/net/wireless/iwmc3200wifi/Kconfig b/drivers/net/wireless/iwmc3200wifi/Kconfig
deleted file mode 100644
index 7107ce53d4d4..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/Kconfig
+++ /dev/null
@@ -1,39 +0,0 @@
1config IWM
2 tristate "Intel Wireless Multicomm 3200 WiFi driver (EXPERIMENTAL)"
3 depends on MMC && EXPERIMENTAL
4 depends on CFG80211
5 select FW_LOADER
6 select IWMC3200TOP
7 help
8 The Intel Wireless Multicomm 3200 hardware is a combo
9 card with GPS, Bluetooth, WiMax and 802.11 radios. It
10 runs over SDIO and is typically found on Moorestown
11 based platform. This driver takes care of the 802.11
12 part, which is a fullmac one.
13
14 If you choose to build it as a module, it'll be called
15 iwmc3200wifi.ko.
16
17config IWM_DEBUG
18 bool "Enable full debugging output in iwmc3200wifi"
19 depends on IWM && DEBUG_FS
20 help
21 This option will enable debug tracing and setting for iwm
22
23 You can set the debug level and module through debugfs. By
24 default all modules are set to the IWL_DL_ERR level.
25 To see the list of debug modules and levels, see iwm/debug.h
26
27 For example, if you want the full MLME debug output:
28 echo 0xff > /sys/kernel/debug/iwm/phyN/debug/mlme
29
30 Or, if you want the full debug, for all modules:
31 echo 0xff > /sys/kernel/debug/iwm/phyN/debug/level
32 echo 0xff > /sys/kernel/debug/iwm/phyN/debug/modules
33
34config IWM_TRACING
35 bool "Enable event tracing for iwmc3200wifi"
36 depends on IWM && EVENT_TRACING
37 help
38 Say Y here to trace all the commands and responses between
39 the driver and firmware (including TX/RX frames) with ftrace.
diff --git a/drivers/net/wireless/iwmc3200wifi/Makefile b/drivers/net/wireless/iwmc3200wifi/Makefile
deleted file mode 100644
index cdc7e07ba113..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
1obj-$(CONFIG_IWM) := iwmc3200wifi.o
2iwmc3200wifi-objs += main.o netdev.o rx.o tx.o sdio.o hal.o fw.o
3iwmc3200wifi-objs += commands.o cfg80211.o eeprom.o
4
5iwmc3200wifi-$(CONFIG_IWM_DEBUG) += debugfs.o
6iwmc3200wifi-$(CONFIG_IWM_TRACING) += trace.o
7
8CFLAGS_trace.o := -I$(src)
9
10ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwmc3200wifi/bus.h b/drivers/net/wireless/iwmc3200wifi/bus.h
deleted file mode 100644
index 62edd5888a7b..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/bus.h
+++ /dev/null
@@ -1,57 +0,0 @@
1/*
2 * Intel Wireless Multicomm 3200 WiFi driver
3 *
4 * Copyright (C) 2009 Intel Corporation <ilw@linux.intel.com>
5 * Samuel Ortiz <samuel.ortiz@intel.com>
6 * Zhu Yi <yi.zhu@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 */
23
24#ifndef __IWM_BUS_H__
25#define __IWM_BUS_H__
26
27#include "iwm.h"
28
29struct iwm_if_ops {
30 int (*enable)(struct iwm_priv *iwm);
31 int (*disable)(struct iwm_priv *iwm);
32 int (*send_chunk)(struct iwm_priv *iwm, u8* buf, int count);
33
34 void (*debugfs_init)(struct iwm_priv *iwm, struct dentry *parent_dir);
35 void (*debugfs_exit)(struct iwm_priv *iwm);
36
37 const char *umac_name;
38 const char *calib_lmac_name;
39 const char *lmac_name;
40};
41
42static inline int iwm_bus_send_chunk(struct iwm_priv *iwm, u8 *buf, int count)
43{
44 return iwm->bus_ops->send_chunk(iwm, buf, count);
45}
46
47static inline int iwm_bus_enable(struct iwm_priv *iwm)
48{
49 return iwm->bus_ops->enable(iwm);
50}
51
52static inline int iwm_bus_disable(struct iwm_priv *iwm)
53{
54 return iwm->bus_ops->disable(iwm);
55}
56
57#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
deleted file mode 100644
index 48e8218fd23b..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c
+++ /dev/null
@@ -1,882 +0,0 @@
1/*
2 * Intel Wireless Multicomm 3200 WiFi driver
3 *
4 * Copyright (C) 2009 Intel Corporation <ilw@linux.intel.com>
5 * Samuel Ortiz <samuel.ortiz@intel.com>
6 * Zhu Yi <yi.zhu@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 */
23
24#include <linux/kernel.h>
25#include <linux/netdevice.h>
26#include <linux/sched.h>
27#include <linux/etherdevice.h>
28#include <linux/wireless.h>
29#include <linux/ieee80211.h>
30#include <linux/slab.h>
31#include <net/cfg80211.h>
32
33#include "iwm.h"
34#include "commands.h"
35#include "cfg80211.h"
36#include "debug.h"
37
38#define RATETAB_ENT(_rate, _rateid, _flags) \
39 { \
40 .bitrate = (_rate), \
41 .hw_value = (_rateid), \
42 .flags = (_flags), \
43 }
44
45#define CHAN2G(_channel, _freq, _flags) { \
46 .band = IEEE80211_BAND_2GHZ, \
47 .center_freq = (_freq), \
48 .hw_value = (_channel), \
49 .flags = (_flags), \
50 .max_antenna_gain = 0, \
51 .max_power = 30, \
52}
53
54#define CHAN5G(_channel, _flags) { \
55 .band = IEEE80211_BAND_5GHZ, \
56 .center_freq = 5000 + (5 * (_channel)), \
57 .hw_value = (_channel), \
58 .flags = (_flags), \
59 .max_antenna_gain = 0, \
60 .max_power = 30, \
61}
62
63static struct ieee80211_rate iwm_rates[] = {
64 RATETAB_ENT(10, 0x1, 0),
65 RATETAB_ENT(20, 0x2, 0),
66 RATETAB_ENT(55, 0x4, 0),
67 RATETAB_ENT(110, 0x8, 0),
68 RATETAB_ENT(60, 0x10, 0),
69 RATETAB_ENT(90, 0x20, 0),
70 RATETAB_ENT(120, 0x40, 0),
71 RATETAB_ENT(180, 0x80, 0),
72 RATETAB_ENT(240, 0x100, 0),
73 RATETAB_ENT(360, 0x200, 0),
74 RATETAB_ENT(480, 0x400, 0),
75 RATETAB_ENT(540, 0x800, 0),
76};
77
78#define iwm_a_rates (iwm_rates + 4)
79#define iwm_a_rates_size 8
80#define iwm_g_rates (iwm_rates + 0)
81#define iwm_g_rates_size 12
82
83static struct ieee80211_channel iwm_2ghz_channels[] = {
84 CHAN2G(1, 2412, 0),
85 CHAN2G(2, 2417, 0),
86 CHAN2G(3, 2422, 0),
87 CHAN2G(4, 2427, 0),
88 CHAN2G(5, 2432, 0),
89 CHAN2G(6, 2437, 0),
90 CHAN2G(7, 2442, 0),
91 CHAN2G(8, 2447, 0),
92 CHAN2G(9, 2452, 0),
93 CHAN2G(10, 2457, 0),
94 CHAN2G(11, 2462, 0),
95 CHAN2G(12, 2467, 0),
96 CHAN2G(13, 2472, 0),
97 CHAN2G(14, 2484, 0),
98};
99
100static struct ieee80211_channel iwm_5ghz_a_channels[] = {
101 CHAN5G(34, 0), CHAN5G(36, 0),
102 CHAN5G(38, 0), CHAN5G(40, 0),
103 CHAN5G(42, 0), CHAN5G(44, 0),
104 CHAN5G(46, 0), CHAN5G(48, 0),
105 CHAN5G(52, 0), CHAN5G(56, 0),
106 CHAN5G(60, 0), CHAN5G(64, 0),
107 CHAN5G(100, 0), CHAN5G(104, 0),
108 CHAN5G(108, 0), CHAN5G(112, 0),
109 CHAN5G(116, 0), CHAN5G(120, 0),
110 CHAN5G(124, 0), CHAN5G(128, 0),
111 CHAN5G(132, 0), CHAN5G(136, 0),
112 CHAN5G(140, 0), CHAN5G(149, 0),
113 CHAN5G(153, 0), CHAN5G(157, 0),
114 CHAN5G(161, 0), CHAN5G(165, 0),
115 CHAN5G(184, 0), CHAN5G(188, 0),
116 CHAN5G(192, 0), CHAN5G(196, 0),
117 CHAN5G(200, 0), CHAN5G(204, 0),
118 CHAN5G(208, 0), CHAN5G(212, 0),
119 CHAN5G(216, 0),
120};
121
122static struct ieee80211_supported_band iwm_band_2ghz = {
123 .channels = iwm_2ghz_channels,
124 .n_channels = ARRAY_SIZE(iwm_2ghz_channels),
125 .bitrates = iwm_g_rates,
126 .n_bitrates = iwm_g_rates_size,
127};
128
129static struct ieee80211_supported_band iwm_band_5ghz = {
130 .channels = iwm_5ghz_a_channels,
131 .n_channels = ARRAY_SIZE(iwm_5ghz_a_channels),
132 .bitrates = iwm_a_rates,
133 .n_bitrates = iwm_a_rates_size,
134};
135
136static int iwm_key_init(struct iwm_key *key, u8 key_index,
137 const u8 *mac_addr, struct key_params *params)
138{
139 key->hdr.key_idx = key_index;
140 if (!mac_addr || is_broadcast_ether_addr(mac_addr)) {
141 key->hdr.multicast = 1;
142 memset(key->hdr.mac, 0xff, ETH_ALEN);
143 } else {
144 key->hdr.multicast = 0;
145 memcpy(key->hdr.mac, mac_addr, ETH_ALEN);
146 }
147
148 if (params) {
149 if (params->key_len > WLAN_MAX_KEY_LEN ||
150 params->seq_len > IW_ENCODE_SEQ_MAX_SIZE)
151 return -EINVAL;
152
153 key->cipher = params->cipher;
154 key->key_len = params->key_len;
155 key->seq_len = params->seq_len;
156 memcpy(key->key, params->key, key->key_len);
157 memcpy(key->seq, params->seq, key->seq_len);
158 }
159
160 return 0;
161}
162
163static int iwm_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
164 u8 key_index, bool pairwise, const u8 *mac_addr,
165 struct key_params *params)
166{
167 struct iwm_priv *iwm = ndev_to_iwm(ndev);
168 struct iwm_key *key;
169 int ret;
170
171 IWM_DBG_WEXT(iwm, DBG, "Adding key for %pM\n", mac_addr);
172
173 if (key_index >= IWM_NUM_KEYS)
174 return -ENOENT;
175
176 key = &iwm->keys[key_index];
177 memset(key, 0, sizeof(struct iwm_key));
178 ret = iwm_key_init(key, key_index, mac_addr, params);
179 if (ret < 0) {
180 IWM_ERR(iwm, "Invalid key_params\n");
181 return ret;
182 }
183
184 return iwm_set_key(iwm, 0, key);
185}
186
187static int iwm_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
188 u8 key_index, bool pairwise, const u8 *mac_addr,
189 void *cookie,
190 void (*callback)(void *cookie,
191 struct key_params*))
192{
193 struct iwm_priv *iwm = ndev_to_iwm(ndev);
194 struct iwm_key *key;
195 struct key_params params;
196
197 IWM_DBG_WEXT(iwm, DBG, "Getting key %d\n", key_index);
198
199 if (key_index >= IWM_NUM_KEYS)
200 return -ENOENT;
201
202 memset(&params, 0, sizeof(params));
203
204 key = &iwm->keys[key_index];
205 params.cipher = key->cipher;
206 params.key_len = key->key_len;
207 params.seq_len = key->seq_len;
208 params.seq = key->seq;
209 params.key = key->key;
210
211 callback(cookie, &params);
212
213 return key->key_len ? 0 : -ENOENT;
214}
215
216
217static int iwm_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
218 u8 key_index, bool pairwise, const u8 *mac_addr)
219{
220 struct iwm_priv *iwm = ndev_to_iwm(ndev);
221 struct iwm_key *key;
222
223 if (key_index >= IWM_NUM_KEYS)
224 return -ENOENT;
225
226 key = &iwm->keys[key_index];
227 if (!iwm->keys[key_index].key_len) {
228 IWM_DBG_WEXT(iwm, DBG, "Key %d not used\n", key_index);
229 return 0;
230 }
231
232 if (key_index == iwm->default_key)
233 iwm->default_key = -1;
234
235 return iwm_set_key(iwm, 1, key);
236}
237
238static int iwm_cfg80211_set_default_key(struct wiphy *wiphy,
239 struct net_device *ndev,
240 u8 key_index, bool unicast,
241 bool multicast)
242{
243 struct iwm_priv *iwm = ndev_to_iwm(ndev);
244
245 IWM_DBG_WEXT(iwm, DBG, "Default key index is: %d\n", key_index);
246
247 if (key_index >= IWM_NUM_KEYS)
248 return -ENOENT;
249
250 if (!iwm->keys[key_index].key_len) {
251 IWM_ERR(iwm, "Key %d not used\n", key_index);
252 return -EINVAL;
253 }
254
255 iwm->default_key = key_index;
256
257 return iwm_set_tx_key(iwm, key_index);
258}
259
260static int iwm_cfg80211_get_station(struct wiphy *wiphy,
261 struct net_device *ndev,
262 u8 *mac, struct station_info *sinfo)
263{
264 struct iwm_priv *iwm = ndev_to_iwm(ndev);
265
266 if (memcmp(mac, iwm->bssid, ETH_ALEN))
267 return -ENOENT;
268
269 sinfo->filled |= STATION_INFO_TX_BITRATE;
270 sinfo->txrate.legacy = iwm->rate * 10;
271
272 if (test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)) {
273 sinfo->filled |= STATION_INFO_SIGNAL;
274 sinfo->signal = iwm->wstats.qual.level;
275 }
276
277 return 0;
278}
279
280
281int iwm_cfg80211_inform_bss(struct iwm_priv *iwm)
282{
283 struct wiphy *wiphy = iwm_to_wiphy(iwm);
284 struct iwm_bss_info *bss;
285 struct iwm_umac_notif_bss_info *umac_bss;
286 struct ieee80211_mgmt *mgmt;
287 struct ieee80211_channel *channel;
288 struct ieee80211_supported_band *band;
289 s32 signal;
290 int freq;
291
292 list_for_each_entry(bss, &iwm->bss_list, node) {
293 umac_bss = bss->bss;
294 mgmt = (struct ieee80211_mgmt *)(umac_bss->frame_buf);
295
296 if (umac_bss->band == UMAC_BAND_2GHZ)
297 band = wiphy->bands[IEEE80211_BAND_2GHZ];
298 else if (umac_bss->band == UMAC_BAND_5GHZ)
299 band = wiphy->bands[IEEE80211_BAND_5GHZ];
300 else {
301 IWM_ERR(iwm, "Invalid band: %d\n", umac_bss->band);
302 return -EINVAL;
303 }
304
305 freq = ieee80211_channel_to_frequency(umac_bss->channel,
306 band->band);
307 channel = ieee80211_get_channel(wiphy, freq);
308 signal = umac_bss->rssi * 100;
309
310 if (!cfg80211_inform_bss_frame(wiphy, channel, mgmt,
311 le16_to_cpu(umac_bss->frame_len),
312 signal, GFP_KERNEL))
313 return -EINVAL;
314 }
315
316 return 0;
317}
318
319static int iwm_cfg80211_change_iface(struct wiphy *wiphy,
320 struct net_device *ndev,
321 enum nl80211_iftype type, u32 *flags,
322 struct vif_params *params)
323{
324 struct wireless_dev *wdev;
325 struct iwm_priv *iwm;
326 u32 old_mode;
327
328 wdev = ndev->ieee80211_ptr;
329 iwm = ndev_to_iwm(ndev);
330 old_mode = iwm->conf.mode;
331
332 switch (type) {
333 case NL80211_IFTYPE_STATION:
334 iwm->conf.mode = UMAC_MODE_BSS;
335 break;
336 case NL80211_IFTYPE_ADHOC:
337 iwm->conf.mode = UMAC_MODE_IBSS;
338 break;
339 default:
340 return -EOPNOTSUPP;
341 }
342
343 wdev->iftype = type;
344
345 if ((old_mode == iwm->conf.mode) || !iwm->umac_profile)
346 return 0;
347
348 iwm->umac_profile->mode = cpu_to_le32(iwm->conf.mode);
349
350 if (iwm->umac_profile_active)
351 iwm_invalidate_mlme_profile(iwm);
352
353 return 0;
354}
355
356static int iwm_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
357 struct cfg80211_scan_request *request)
358{
359 struct iwm_priv *iwm = ndev_to_iwm(ndev);
360 int ret;
361
362 if (!test_bit(IWM_STATUS_READY, &iwm->status)) {
363 IWM_ERR(iwm, "Scan while device is not ready\n");
364 return -EIO;
365 }
366
367 if (test_bit(IWM_STATUS_SCANNING, &iwm->status)) {
368 IWM_ERR(iwm, "Scanning already\n");
369 return -EAGAIN;
370 }
371
372 if (test_bit(IWM_STATUS_SCAN_ABORTING, &iwm->status)) {
373 IWM_ERR(iwm, "Scanning being aborted\n");
374 return -EAGAIN;
375 }
376
377 set_bit(IWM_STATUS_SCANNING, &iwm->status);
378
379 ret = iwm_scan_ssids(iwm, request->ssids, request->n_ssids);
380 if (ret) {
381 clear_bit(IWM_STATUS_SCANNING, &iwm->status);
382 return ret;
383 }
384
385 iwm->scan_request = request;
386 return 0;
387}
388
389static int iwm_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
390{
391 struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
392
393 if (changed & WIPHY_PARAM_RTS_THRESHOLD &&
394 (iwm->conf.rts_threshold != wiphy->rts_threshold)) {
395 int ret;
396
397 iwm->conf.rts_threshold = wiphy->rts_threshold;
398
399 ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
400 CFG_RTS_THRESHOLD,
401 iwm->conf.rts_threshold);
402 if (ret < 0)
403 return ret;
404 }
405
406 if (changed & WIPHY_PARAM_FRAG_THRESHOLD &&
407 (iwm->conf.frag_threshold != wiphy->frag_threshold)) {
408 int ret;
409
410 iwm->conf.frag_threshold = wiphy->frag_threshold;
411
412 ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_FA_CFG_FIX,
413 CFG_FRAG_THRESHOLD,
414 iwm->conf.frag_threshold);
415 if (ret < 0)
416 return ret;
417 }
418
419 return 0;
420}
421
422static int iwm_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
423 struct cfg80211_ibss_params *params)
424{
425 struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
426 struct ieee80211_channel *chan = params->channel;
427
428 if (!test_bit(IWM_STATUS_READY, &iwm->status))
429 return -EIO;
430
431 /* UMAC doesn't support creating or joining an IBSS network
432 * with specified bssid. */
433 if (params->bssid)
434 return -EOPNOTSUPP;
435
436 iwm->channel = ieee80211_frequency_to_channel(chan->center_freq);
437 iwm->umac_profile->ibss.band = chan->band;
438 iwm->umac_profile->ibss.channel = iwm->channel;
439 iwm->umac_profile->ssid.ssid_len = params->ssid_len;
440 memcpy(iwm->umac_profile->ssid.ssid, params->ssid, params->ssid_len);
441
442 return iwm_send_mlme_profile(iwm);
443}
444
445static int iwm_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
446{
447 struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
448
449 if (iwm->umac_profile_active)
450 return iwm_invalidate_mlme_profile(iwm);
451
452 return 0;
453}
454
455static int iwm_set_auth_type(struct iwm_priv *iwm,
456 enum nl80211_auth_type sme_auth_type)
457{
458 u8 *auth_type = &iwm->umac_profile->sec.auth_type;
459
460 switch (sme_auth_type) {
461 case NL80211_AUTHTYPE_AUTOMATIC:
462 case NL80211_AUTHTYPE_OPEN_SYSTEM:
463 IWM_DBG_WEXT(iwm, DBG, "OPEN auth\n");
464 *auth_type = UMAC_AUTH_TYPE_OPEN;
465 break;
466 case NL80211_AUTHTYPE_SHARED_KEY:
467 if (iwm->umac_profile->sec.flags &
468 (UMAC_SEC_FLG_WPA_ON_MSK | UMAC_SEC_FLG_RSNA_ON_MSK)) {
469 IWM_DBG_WEXT(iwm, DBG, "WPA auth alg\n");
470 *auth_type = UMAC_AUTH_TYPE_RSNA_PSK;
471 } else {
472 IWM_DBG_WEXT(iwm, DBG, "WEP shared key auth alg\n");
473 *auth_type = UMAC_AUTH_TYPE_LEGACY_PSK;
474 }
475
476 break;
477 default:
478 IWM_ERR(iwm, "Unsupported auth alg: 0x%x\n", sme_auth_type);
479 return -ENOTSUPP;
480 }
481
482 return 0;
483}
484
485static int iwm_set_wpa_version(struct iwm_priv *iwm, u32 wpa_version)
486{
487 IWM_DBG_WEXT(iwm, DBG, "wpa_version: %d\n", wpa_version);
488
489 if (!wpa_version) {
490 iwm->umac_profile->sec.flags = UMAC_SEC_FLG_LEGACY_PROFILE;
491 return 0;
492 }
493
494 if (wpa_version & NL80211_WPA_VERSION_1)
495 iwm->umac_profile->sec.flags = UMAC_SEC_FLG_WPA_ON_MSK;
496
497 if (wpa_version & NL80211_WPA_VERSION_2)
498 iwm->umac_profile->sec.flags = UMAC_SEC_FLG_RSNA_ON_MSK;
499
500 return 0;
501}
502
503static int iwm_set_cipher(struct iwm_priv *iwm, u32 cipher, bool ucast)
504{
505 u8 *profile_cipher = ucast ? &iwm->umac_profile->sec.ucast_cipher :
506 &iwm->umac_profile->sec.mcast_cipher;
507
508 if (!cipher) {
509 *profile_cipher = UMAC_CIPHER_TYPE_NONE;
510 return 0;
511 }
512
513 IWM_DBG_WEXT(iwm, DBG, "%ccast cipher is 0x%x\n", ucast ? 'u' : 'm',
514 cipher);
515
516 switch (cipher) {
517 case IW_AUTH_CIPHER_NONE:
518 *profile_cipher = UMAC_CIPHER_TYPE_NONE;
519 break;
520 case WLAN_CIPHER_SUITE_WEP40:
521 *profile_cipher = UMAC_CIPHER_TYPE_WEP_40;
522 break;
523 case WLAN_CIPHER_SUITE_WEP104:
524 *profile_cipher = UMAC_CIPHER_TYPE_WEP_104;
525 break;
526 case WLAN_CIPHER_SUITE_TKIP:
527 *profile_cipher = UMAC_CIPHER_TYPE_TKIP;
528 break;
529 case WLAN_CIPHER_SUITE_CCMP:
530 *profile_cipher = UMAC_CIPHER_TYPE_CCMP;
531 break;
532 default:
533 IWM_ERR(iwm, "Unsupported cipher: 0x%x\n", cipher);
534 return -ENOTSUPP;
535 }
536
537 return 0;
538}
539
540static int iwm_set_key_mgt(struct iwm_priv *iwm, u32 key_mgt)
541{
542 u8 *auth_type = &iwm->umac_profile->sec.auth_type;
543
544 IWM_DBG_WEXT(iwm, DBG, "key_mgt: 0x%x\n", key_mgt);
545
546 if (key_mgt == WLAN_AKM_SUITE_8021X)
547 *auth_type = UMAC_AUTH_TYPE_8021X;
548 else if (key_mgt == WLAN_AKM_SUITE_PSK) {
549 if (iwm->umac_profile->sec.flags &
550 (UMAC_SEC_FLG_WPA_ON_MSK | UMAC_SEC_FLG_RSNA_ON_MSK))
551 *auth_type = UMAC_AUTH_TYPE_RSNA_PSK;
552 else
553 *auth_type = UMAC_AUTH_TYPE_LEGACY_PSK;
554 } else {
555 IWM_ERR(iwm, "Invalid key mgt: 0x%x\n", key_mgt);
556 return -EINVAL;
557 }
558
559 return 0;
560}
561
562
563static int iwm_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
564 struct cfg80211_connect_params *sme)
565{
566 struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
567 struct ieee80211_channel *chan = sme->channel;
568 struct key_params key_param;
569 int ret;
570
571 if (!test_bit(IWM_STATUS_READY, &iwm->status))
572 return -EIO;
573
574 if (!sme->ssid)
575 return -EINVAL;
576
577 if (iwm->umac_profile_active) {
578 ret = iwm_invalidate_mlme_profile(iwm);
579 if (ret) {
580 IWM_ERR(iwm, "Couldn't invalidate profile\n");
581 return ret;
582 }
583 }
584
585 if (chan)
586 iwm->channel =
587 ieee80211_frequency_to_channel(chan->center_freq);
588
589 iwm->umac_profile->ssid.ssid_len = sme->ssid_len;
590 memcpy(iwm->umac_profile->ssid.ssid, sme->ssid, sme->ssid_len);
591
592 if (sme->bssid) {
593 IWM_DBG_WEXT(iwm, DBG, "BSSID: %pM\n", sme->bssid);
594 memcpy(&iwm->umac_profile->bssid[0], sme->bssid, ETH_ALEN);
595 iwm->umac_profile->bss_num = 1;
596 } else {
597 memset(&iwm->umac_profile->bssid[0], 0, ETH_ALEN);
598 iwm->umac_profile->bss_num = 0;
599 }
600
601 ret = iwm_set_wpa_version(iwm, sme->crypto.wpa_versions);
602 if (ret < 0)
603 return ret;
604
605 ret = iwm_set_auth_type(iwm, sme->auth_type);
606 if (ret < 0)
607 return ret;
608
609 if (sme->crypto.n_ciphers_pairwise) {
610 ret = iwm_set_cipher(iwm, sme->crypto.ciphers_pairwise[0],
611 true);
612 if (ret < 0)
613 return ret;
614 }
615
616 ret = iwm_set_cipher(iwm, sme->crypto.cipher_group, false);
617 if (ret < 0)
618 return ret;
619
620 if (sme->crypto.n_akm_suites) {
621 ret = iwm_set_key_mgt(iwm, sme->crypto.akm_suites[0]);
622 if (ret < 0)
623 return ret;
624 }
625
626 /*
627 * We save the WEP key in case we want to do shared authentication.
628 * We have to do it so because UMAC will assert whenever it gets a
629 * key before a profile.
630 */
631 if (sme->key) {
632 key_param.key = kmemdup(sme->key, sme->key_len, GFP_KERNEL);
633 if (key_param.key == NULL)
634 return -ENOMEM;
635 key_param.key_len = sme->key_len;
636 key_param.seq_len = 0;
637 key_param.cipher = sme->crypto.ciphers_pairwise[0];
638
639 ret = iwm_key_init(&iwm->keys[sme->key_idx], sme->key_idx,
640 NULL, &key_param);
641 kfree(key_param.key);
642 if (ret < 0) {
643 IWM_ERR(iwm, "Invalid key_params\n");
644 return ret;
645 }
646
647 iwm->default_key = sme->key_idx;
648 }
649
650 /* WPA and open AUTH type from wpa_s means WPS (a.k.a. WSC) */
651 if ((iwm->umac_profile->sec.flags &
652 (UMAC_SEC_FLG_WPA_ON_MSK | UMAC_SEC_FLG_RSNA_ON_MSK)) &&
653 iwm->umac_profile->sec.auth_type == UMAC_AUTH_TYPE_OPEN) {
654 iwm->umac_profile->sec.flags = UMAC_SEC_FLG_WSC_ON_MSK;
655 }
656
657 ret = iwm_send_mlme_profile(iwm);
658
659 if (iwm->umac_profile->sec.auth_type != UMAC_AUTH_TYPE_LEGACY_PSK ||
660 sme->key == NULL)
661 return ret;
662
663 /*
664 * We want to do shared auth.
665 * We need to actually set the key we previously cached,
666 * and then tell the UMAC it's the default one.
667 * That will trigger the auth+assoc UMAC machinery, and again,
668 * this must be done after setting the profile.
669 */
670 ret = iwm_set_key(iwm, 0, &iwm->keys[sme->key_idx]);
671 if (ret < 0)
672 return ret;
673
674 return iwm_set_tx_key(iwm, iwm->default_key);
675}
676
677static int iwm_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
678 u16 reason_code)
679{
680 struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
681
682 IWM_DBG_WEXT(iwm, DBG, "Active: %d\n", iwm->umac_profile_active);
683
684 if (iwm->umac_profile_active)
685 iwm_invalidate_mlme_profile(iwm);
686
687 return 0;
688}
689
690static int iwm_cfg80211_set_txpower(struct wiphy *wiphy,
691 enum nl80211_tx_power_setting type, int mbm)
692{
693 struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
694 int ret;
695
696 switch (type) {
697 case NL80211_TX_POWER_AUTOMATIC:
698 return 0;
699 case NL80211_TX_POWER_FIXED:
700 if (mbm < 0 || (mbm % 100))
701 return -EOPNOTSUPP;
702
703 if (!test_bit(IWM_STATUS_READY, &iwm->status))
704 return 0;
705
706 ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
707 CFG_TX_PWR_LIMIT_USR,
708 MBM_TO_DBM(mbm) * 2);
709 if (ret < 0)
710 return ret;
711
712 return iwm_tx_power_trigger(iwm);
713 default:
714 IWM_ERR(iwm, "Unsupported power type: %d\n", type);
715 return -EOPNOTSUPP;
716 }
717
718 return 0;
719}
720
721static int iwm_cfg80211_get_txpower(struct wiphy *wiphy, int *dbm)
722{
723 struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
724
725 *dbm = iwm->txpower >> 1;
726
727 return 0;
728}
729
730static int iwm_cfg80211_set_power_mgmt(struct wiphy *wiphy,
731 struct net_device *dev,
732 bool enabled, int timeout)
733{
734 struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
735 u32 power_index;
736
737 if (enabled)
738 power_index = IWM_POWER_INDEX_DEFAULT;
739 else
740 power_index = IWM_POWER_INDEX_MIN;
741
742 if (power_index == iwm->conf.power_index)
743 return 0;
744
745 iwm->conf.power_index = power_index;
746
747 return iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
748 CFG_POWER_INDEX, iwm->conf.power_index);
749}
750
751static int iwm_cfg80211_set_pmksa(struct wiphy *wiphy,
752 struct net_device *netdev,
753 struct cfg80211_pmksa *pmksa)
754{
755 struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
756
757 return iwm_send_pmkid_update(iwm, pmksa, IWM_CMD_PMKID_ADD);
758}
759
760static int iwm_cfg80211_del_pmksa(struct wiphy *wiphy,
761 struct net_device *netdev,
762 struct cfg80211_pmksa *pmksa)
763{
764 struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
765
766 return iwm_send_pmkid_update(iwm, pmksa, IWM_CMD_PMKID_DEL);
767}
768
769static int iwm_cfg80211_flush_pmksa(struct wiphy *wiphy,
770 struct net_device *netdev)
771{
772 struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
773 struct cfg80211_pmksa pmksa;
774
775 memset(&pmksa, 0, sizeof(struct cfg80211_pmksa));
776
777 return iwm_send_pmkid_update(iwm, &pmksa, IWM_CMD_PMKID_FLUSH);
778}
779
780
781static struct cfg80211_ops iwm_cfg80211_ops = {
782 .change_virtual_intf = iwm_cfg80211_change_iface,
783 .add_key = iwm_cfg80211_add_key,
784 .get_key = iwm_cfg80211_get_key,
785 .del_key = iwm_cfg80211_del_key,
786 .set_default_key = iwm_cfg80211_set_default_key,
787 .get_station = iwm_cfg80211_get_station,
788 .scan = iwm_cfg80211_scan,
789 .set_wiphy_params = iwm_cfg80211_set_wiphy_params,
790 .connect = iwm_cfg80211_connect,
791 .disconnect = iwm_cfg80211_disconnect,
792 .join_ibss = iwm_cfg80211_join_ibss,
793 .leave_ibss = iwm_cfg80211_leave_ibss,
794 .set_tx_power = iwm_cfg80211_set_txpower,
795 .get_tx_power = iwm_cfg80211_get_txpower,
796 .set_power_mgmt = iwm_cfg80211_set_power_mgmt,
797 .set_pmksa = iwm_cfg80211_set_pmksa,
798 .del_pmksa = iwm_cfg80211_del_pmksa,
799 .flush_pmksa = iwm_cfg80211_flush_pmksa,
800};
801
802static const u32 cipher_suites[] = {
803 WLAN_CIPHER_SUITE_WEP40,
804 WLAN_CIPHER_SUITE_WEP104,
805 WLAN_CIPHER_SUITE_TKIP,
806 WLAN_CIPHER_SUITE_CCMP,
807};
808
809struct wireless_dev *iwm_wdev_alloc(int sizeof_bus, struct device *dev)
810{
811 int ret = 0;
812 struct wireless_dev *wdev;
813
814 /*
815 * We're trying to have the following memory
816 * layout:
817 *
818 * +-------------------------+
819 * | struct wiphy |
820 * +-------------------------+
821 * | struct iwm_priv |
822 * +-------------------------+
823 * | bus private data |
824 * | (e.g. iwm_priv_sdio) |
825 * +-------------------------+
826 *
827 */
828
829 wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
830 if (!wdev) {
831 dev_err(dev, "Couldn't allocate wireless device\n");
832 return ERR_PTR(-ENOMEM);
833 }
834
835 wdev->wiphy = wiphy_new(&iwm_cfg80211_ops,
836 sizeof(struct iwm_priv) + sizeof_bus);
837 if (!wdev->wiphy) {
838 dev_err(dev, "Couldn't allocate wiphy device\n");
839 ret = -ENOMEM;
840 goto out_err_new;
841 }
842
843 set_wiphy_dev(wdev->wiphy, dev);
844 wdev->wiphy->max_scan_ssids = UMAC_WIFI_IF_PROBE_OPTION_MAX;
845 wdev->wiphy->max_num_pmkids = UMAC_MAX_NUM_PMKIDS;
846 wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
847 BIT(NL80211_IFTYPE_ADHOC);
848 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &iwm_band_2ghz;
849 wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &iwm_band_5ghz;
850 wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
851
852 wdev->wiphy->cipher_suites = cipher_suites;
853 wdev->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
854
855 ret = wiphy_register(wdev->wiphy);
856 if (ret < 0) {
857 dev_err(dev, "Couldn't register wiphy device\n");
858 goto out_err_register;
859 }
860
861 return wdev;
862
863 out_err_register:
864 wiphy_free(wdev->wiphy);
865
866 out_err_new:
867 kfree(wdev);
868
869 return ERR_PTR(ret);
870}
871
872void iwm_wdev_free(struct iwm_priv *iwm)
873{
874 struct wireless_dev *wdev = iwm_to_wdev(iwm);
875
876 if (!wdev)
877 return;
878
879 wiphy_unregister(wdev->wiphy);
880 wiphy_free(wdev->wiphy);
881 kfree(wdev);
882}
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.h b/drivers/net/wireless/iwmc3200wifi/cfg80211.h
deleted file mode 100644
index 56a34145acbf..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.h
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * Intel Wireless Multicomm 3200 WiFi driver
3 *
4 * Copyright (C) 2009 Intel Corporation <ilw@linux.intel.com>
5 * Samuel Ortiz <samuel.ortiz@intel.com>
6 * Zhu Yi <yi.zhu@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 */
23
24#ifndef __IWM_CFG80211_H__
25#define __IWM_CFG80211_H__
26
27int iwm_cfg80211_inform_bss(struct iwm_priv *iwm);
28struct wireless_dev *iwm_wdev_alloc(int sizeof_bus, struct device *dev);
29void iwm_wdev_free(struct iwm_priv *iwm);
30
31#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c
deleted file mode 100644
index bd75078c454b..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/commands.c
+++ /dev/null
@@ -1,1002 +0,0 @@
1/*
2 * Intel Wireless Multicomm 3200 WiFi driver
3 *
4 * Copyright (C) 2009 Intel Corporation. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 *
33 * Intel Corporation <ilw@linux.intel.com>
34 * Samuel Ortiz <samuel.ortiz@intel.com>
35 * Zhu Yi <yi.zhu@intel.com>
36 *
37 */
38
39#include <linux/kernel.h>
40#include <linux/wireless.h>
41#include <linux/etherdevice.h>
42#include <linux/ieee80211.h>
43#include <linux/sched.h>
44#include <linux/slab.h>
45#include <linux/moduleparam.h>
46
47#include "iwm.h"
48#include "bus.h"
49#include "hal.h"
50#include "umac.h"
51#include "commands.h"
52#include "debug.h"
53
54static int iwm_send_lmac_ptrough_cmd(struct iwm_priv *iwm,
55 u8 lmac_cmd_id,
56 const void *lmac_payload,
57 u16 lmac_payload_size,
58 u8 resp)
59{
60 struct iwm_udma_wifi_cmd udma_cmd = UDMA_LMAC_INIT;
61 struct iwm_umac_cmd umac_cmd;
62 struct iwm_lmac_cmd lmac_cmd;
63
64 lmac_cmd.id = lmac_cmd_id;
65
66 umac_cmd.id = UMAC_CMD_OPCODE_WIFI_PASS_THROUGH;
67 umac_cmd.resp = resp;
68
69 return iwm_hal_send_host_cmd(iwm, &udma_cmd, &umac_cmd, &lmac_cmd,
70 lmac_payload, lmac_payload_size);
71}
72
73int iwm_send_wifi_if_cmd(struct iwm_priv *iwm, void *payload, u16 payload_size,
74 bool resp)
75{
76 struct iwm_umac_wifi_if *hdr = (struct iwm_umac_wifi_if *)payload;
77 struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
78 struct iwm_umac_cmd umac_cmd;
79 int ret;
80 u8 oid = hdr->oid;
81
82 if (!test_bit(IWM_STATUS_READY, &iwm->status)) {
83 IWM_ERR(iwm, "Interface is not ready yet");
84 return -EAGAIN;
85 }
86
87 umac_cmd.id = UMAC_CMD_OPCODE_WIFI_IF_WRAPPER;
88 umac_cmd.resp = resp;
89
90 ret = iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd,
91 payload, payload_size);
92
93 if (resp) {
94 ret = wait_event_interruptible_timeout(iwm->wifi_ntfy_queue,
95 test_and_clear_bit(oid, &iwm->wifi_ntfy[0]),
96 3 * HZ);
97
98 return ret ? 0 : -EBUSY;
99 }
100
101 return ret;
102}
103
104static int modparam_wiwi = COEX_MODE_CM;
105module_param_named(wiwi, modparam_wiwi, int, 0644);
106MODULE_PARM_DESC(wiwi, "Wifi-WiMAX coexistence: 1=SA, 2=XOR, 3=CM (default)");
107
108static struct coex_event iwm_sta_xor_prio_tbl[COEX_EVENTS_NUM] =
109{
110 {4, 3, 0, COEX_UNASSOC_IDLE_FLAGS},
111 {4, 3, 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
112 {4, 3, 0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
113 {4, 3, 0, COEX_CALIBRATION_FLAGS},
114 {4, 3, 0, COEX_PERIODIC_CALIBRATION_FLAGS},
115 {4, 3, 0, COEX_CONNECTION_ESTAB_FLAGS},
116 {4, 3, 0, COEX_ASSOCIATED_IDLE_FLAGS},
117 {4, 3, 0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
118 {4, 3, 0, COEX_ASSOC_AUTO_SCAN_FLAGS},
119 {4, 3, 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
120 {6, 3, 0, COEX_XOR_RF_ON_FLAGS},
121 {4, 3, 0, COEX_RF_OFF_FLAGS},
122 {6, 6, 0, COEX_STAND_ALONE_DEBUG_FLAGS},
123 {4, 3, 0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
124 {4, 3, 0, COEX_RSRVD1_FLAGS},
125 {4, 3, 0, COEX_RSRVD2_FLAGS}
126};
127
128static struct coex_event iwm_sta_cm_prio_tbl[COEX_EVENTS_NUM] =
129{
130 {1, 1, 0, COEX_UNASSOC_IDLE_FLAGS},
131 {4, 4, 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
132 {3, 3, 0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
133 {6, 6, 0, COEX_CALIBRATION_FLAGS},
134 {3, 3, 0, COEX_PERIODIC_CALIBRATION_FLAGS},
135 {6, 5, 0, COEX_CONNECTION_ESTAB_FLAGS},
136 {4, 4, 0, COEX_ASSOCIATED_IDLE_FLAGS},
137 {4, 4, 0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
138 {4, 4, 0, COEX_ASSOC_AUTO_SCAN_FLAGS},
139 {4, 4, 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
140 {1, 1, 0, COEX_RF_ON_FLAGS},
141 {1, 1, 0, COEX_RF_OFF_FLAGS},
142 {7, 7, 0, COEX_STAND_ALONE_DEBUG_FLAGS},
143 {5, 4, 0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
144 {1, 1, 0, COEX_RSRVD1_FLAGS},
145 {1, 1, 0, COEX_RSRVD2_FLAGS}
146};
147
148int iwm_send_prio_table(struct iwm_priv *iwm)
149{
150 struct iwm_coex_prio_table_cmd coex_table_cmd;
151 u32 coex_enabled, mode_enabled;
152
153 memset(&coex_table_cmd, 0, sizeof(struct iwm_coex_prio_table_cmd));
154
155 coex_table_cmd.flags = COEX_FLAGS_STA_TABLE_VALID_MSK;
156
157 switch (modparam_wiwi) {
158 case COEX_MODE_XOR:
159 case COEX_MODE_CM:
160 coex_enabled = 1;
161 break;
162 default:
163 coex_enabled = 0;
164 break;
165 }
166
167 switch (iwm->conf.mode) {
168 case UMAC_MODE_BSS:
169 case UMAC_MODE_IBSS:
170 mode_enabled = 1;
171 break;
172 default:
173 mode_enabled = 0;
174 break;
175 }
176
177 if (coex_enabled && mode_enabled) {
178 coex_table_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK |
179 COEX_FLAGS_ASSOC_WAKEUP_UMASK_MSK |
180 COEX_FLAGS_UNASSOC_WAKEUP_UMASK_MSK;
181
182 switch (modparam_wiwi) {
183 case COEX_MODE_XOR:
184 memcpy(coex_table_cmd.sta_prio, iwm_sta_xor_prio_tbl,
185 sizeof(iwm_sta_xor_prio_tbl));
186 break;
187 case COEX_MODE_CM:
188 memcpy(coex_table_cmd.sta_prio, iwm_sta_cm_prio_tbl,
189 sizeof(iwm_sta_cm_prio_tbl));
190 break;
191 default:
192 IWM_ERR(iwm, "Invalid coex_mode 0x%x\n",
193 modparam_wiwi);
194 break;
195 }
196 } else
197 IWM_WARN(iwm, "coexistense disabled\n");
198
199 return iwm_send_lmac_ptrough_cmd(iwm, COEX_PRIORITY_TABLE_CMD,
200 &coex_table_cmd,
201 sizeof(struct iwm_coex_prio_table_cmd), 0);
202}
203
204int iwm_send_init_calib_cfg(struct iwm_priv *iwm, u8 calib_requested)
205{
206 struct iwm_lmac_cal_cfg_cmd cal_cfg_cmd;
207
208 memset(&cal_cfg_cmd, 0, sizeof(struct iwm_lmac_cal_cfg_cmd));
209
210 cal_cfg_cmd.ucode_cfg.init.enable = cpu_to_le32(calib_requested);
211 cal_cfg_cmd.ucode_cfg.init.start = cpu_to_le32(calib_requested);
212 cal_cfg_cmd.ucode_cfg.init.send_res = cpu_to_le32(calib_requested);
213 cal_cfg_cmd.ucode_cfg.flags =
214 cpu_to_le32(CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_AFTER_MSK);
215
216 return iwm_send_lmac_ptrough_cmd(iwm, CALIBRATION_CFG_CMD, &cal_cfg_cmd,
217 sizeof(struct iwm_lmac_cal_cfg_cmd), 1);
218}
219
220int iwm_send_periodic_calib_cfg(struct iwm_priv *iwm, u8 calib_requested)
221{
222 struct iwm_lmac_cal_cfg_cmd cal_cfg_cmd;
223
224 memset(&cal_cfg_cmd, 0, sizeof(struct iwm_lmac_cal_cfg_cmd));
225
226 cal_cfg_cmd.ucode_cfg.periodic.enable = cpu_to_le32(calib_requested);
227 cal_cfg_cmd.ucode_cfg.periodic.start = cpu_to_le32(calib_requested);
228
229 return iwm_send_lmac_ptrough_cmd(iwm, CALIBRATION_CFG_CMD, &cal_cfg_cmd,
230 sizeof(struct iwm_lmac_cal_cfg_cmd), 0);
231}
232
233int iwm_store_rxiq_calib_result(struct iwm_priv *iwm)
234{
235 struct iwm_calib_rxiq *rxiq;
236 u8 *eeprom_rxiq = iwm_eeprom_access(iwm, IWM_EEPROM_CALIB_RXIQ);
237 int grplen = sizeof(struct iwm_calib_rxiq_group);
238
239 rxiq = kzalloc(sizeof(struct iwm_calib_rxiq), GFP_KERNEL);
240 if (!rxiq) {
241 IWM_ERR(iwm, "Couldn't alloc memory for RX IQ\n");
242 return -ENOMEM;
243 }
244
245 eeprom_rxiq = iwm_eeprom_access(iwm, IWM_EEPROM_CALIB_RXIQ);
246 if (IS_ERR(eeprom_rxiq)) {
247 IWM_ERR(iwm, "Couldn't access EEPROM RX IQ entry\n");
248 kfree(rxiq);
249 return PTR_ERR(eeprom_rxiq);
250 }
251
252 iwm->calib_res[SHILOH_PHY_CALIBRATE_RX_IQ_CMD].buf = (u8 *)rxiq;
253 iwm->calib_res[SHILOH_PHY_CALIBRATE_RX_IQ_CMD].size = sizeof(*rxiq);
254
255 rxiq->hdr.opcode = SHILOH_PHY_CALIBRATE_RX_IQ_CMD;
256 rxiq->hdr.first_grp = 0;
257 rxiq->hdr.grp_num = 1;
258 rxiq->hdr.all_data_valid = 1;
259
260 memcpy(&rxiq->group[0], eeprom_rxiq, 4 * grplen);
261 memcpy(&rxiq->group[4], eeprom_rxiq + 6 * grplen, grplen);
262
263 return 0;
264}
265
266int iwm_send_calib_results(struct iwm_priv *iwm)
267{
268 int i, ret = 0;
269
270 for (i = PHY_CALIBRATE_OPCODES_NUM; i < CALIBRATION_CMD_NUM; i++) {
271 if (test_bit(i - PHY_CALIBRATE_OPCODES_NUM,
272 &iwm->calib_done_map)) {
273 IWM_DBG_CMD(iwm, DBG,
274 "Send calibration %d result\n", i);
275 ret |= iwm_send_lmac_ptrough_cmd(iwm,
276 REPLY_PHY_CALIBRATION_CMD,
277 iwm->calib_res[i].buf,
278 iwm->calib_res[i].size, 0);
279
280 kfree(iwm->calib_res[i].buf);
281 iwm->calib_res[i].buf = NULL;
282 iwm->calib_res[i].size = 0;
283 }
284 }
285
286 return ret;
287}
288
289int iwm_send_ct_kill_cfg(struct iwm_priv *iwm, u8 entry, u8 exit)
290{
291 struct iwm_ct_kill_cfg_cmd cmd;
292
293 cmd.entry_threshold = entry;
294 cmd.exit_threshold = exit;
295
296 return iwm_send_lmac_ptrough_cmd(iwm, REPLY_CT_KILL_CONFIG_CMD, &cmd,
297 sizeof(struct iwm_ct_kill_cfg_cmd), 0);
298}
299
300int iwm_send_umac_reset(struct iwm_priv *iwm, __le32 reset_flags, bool resp)
301{
302 struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
303 struct iwm_umac_cmd umac_cmd;
304 struct iwm_umac_cmd_reset reset;
305
306 reset.flags = reset_flags;
307
308 umac_cmd.id = UMAC_CMD_OPCODE_RESET;
309 umac_cmd.resp = resp;
310
311 return iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd, &reset,
312 sizeof(struct iwm_umac_cmd_reset));
313}
314
315int iwm_umac_set_config_fix(struct iwm_priv *iwm, u16 tbl, u16 key, u32 value)
316{
317 struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
318 struct iwm_umac_cmd umac_cmd;
319 struct iwm_umac_cmd_set_param_fix param;
320
321 if ((tbl != UMAC_PARAM_TBL_CFG_FIX) &&
322 (tbl != UMAC_PARAM_TBL_FA_CFG_FIX))
323 return -EINVAL;
324
325 umac_cmd.id = UMAC_CMD_OPCODE_SET_PARAM_FIX;
326 umac_cmd.resp = 0;
327
328 param.tbl = cpu_to_le16(tbl);
329 param.key = cpu_to_le16(key);
330 param.value = cpu_to_le32(value);
331
332 return iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd, &param,
333 sizeof(struct iwm_umac_cmd_set_param_fix));
334}
335
336int iwm_umac_set_config_var(struct iwm_priv *iwm, u16 key,
337 void *payload, u16 payload_size)
338{
339 struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
340 struct iwm_umac_cmd umac_cmd;
341 struct iwm_umac_cmd_set_param_var *param_hdr;
342 u8 *param;
343 int ret;
344
345 param = kzalloc(payload_size +
346 sizeof(struct iwm_umac_cmd_set_param_var), GFP_KERNEL);
347 if (!param) {
348 IWM_ERR(iwm, "Couldn't allocate param\n");
349 return -ENOMEM;
350 }
351
352 param_hdr = (struct iwm_umac_cmd_set_param_var *)param;
353
354 umac_cmd.id = UMAC_CMD_OPCODE_SET_PARAM_VAR;
355 umac_cmd.resp = 0;
356
357 param_hdr->tbl = cpu_to_le16(UMAC_PARAM_TBL_CFG_VAR);
358 param_hdr->key = cpu_to_le16(key);
359 param_hdr->len = cpu_to_le16(payload_size);
360 memcpy(param + sizeof(struct iwm_umac_cmd_set_param_var),
361 payload, payload_size);
362
363 ret = iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd, param,
364 sizeof(struct iwm_umac_cmd_set_param_var) +
365 payload_size);
366 kfree(param);
367
368 return ret;
369}
370
371int iwm_send_umac_config(struct iwm_priv *iwm, __le32 reset_flags)
372{
373 int ret;
374
375 /* Use UMAC default values */
376 ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
377 CFG_POWER_INDEX, iwm->conf.power_index);
378 if (ret < 0)
379 return ret;
380
381 ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_FA_CFG_FIX,
382 CFG_FRAG_THRESHOLD,
383 iwm->conf.frag_threshold);
384 if (ret < 0)
385 return ret;
386
387 ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
388 CFG_RTS_THRESHOLD,
389 iwm->conf.rts_threshold);
390 if (ret < 0)
391 return ret;
392
393 ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
394 CFG_CTS_TO_SELF, iwm->conf.cts_to_self);
395 if (ret < 0)
396 return ret;
397
398 ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
399 CFG_WIRELESS_MODE,
400 iwm->conf.wireless_mode);
401 if (ret < 0)
402 return ret;
403
404 ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
405 CFG_COEX_MODE, modparam_wiwi);
406 if (ret < 0)
407 return ret;
408
409 /*
410 ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
411 CFG_ASSOCIATION_TIMEOUT,
412 iwm->conf.assoc_timeout);
413 if (ret < 0)
414 return ret;
415
416 ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
417 CFG_ROAM_TIMEOUT,
418 iwm->conf.roam_timeout);
419 if (ret < 0)
420 return ret;
421
422 ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
423 CFG_WIRELESS_MODE,
424 WIRELESS_MODE_11A | WIRELESS_MODE_11G);
425 if (ret < 0)
426 return ret;
427 */
428
429 ret = iwm_umac_set_config_var(iwm, CFG_NET_ADDR,
430 iwm_to_ndev(iwm)->dev_addr, ETH_ALEN);
431 if (ret < 0)
432 return ret;
433
434 /* UMAC PM static configurations */
435 ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
436 CFG_PM_LEGACY_RX_TIMEOUT, 0x12C);
437 if (ret < 0)
438 return ret;
439
440 ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
441 CFG_PM_LEGACY_TX_TIMEOUT, 0x15E);
442 if (ret < 0)
443 return ret;
444
445 ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
446 CFG_PM_CTRL_FLAGS, 0x1);
447 if (ret < 0)
448 return ret;
449
450 ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
451 CFG_PM_KEEP_ALIVE_IN_BEACONS, 0x80);
452 if (ret < 0)
453 return ret;
454
455 /* reset UMAC */
456 ret = iwm_send_umac_reset(iwm, reset_flags, 1);
457 if (ret < 0)
458 return ret;
459
460 ret = iwm_notif_handle(iwm, UMAC_CMD_OPCODE_RESET, IWM_SRC_UMAC,
461 WAIT_NOTIF_TIMEOUT);
462 if (ret) {
463 IWM_ERR(iwm, "Wait for UMAC RESET timeout\n");
464 return ret;
465 }
466
467 return ret;
468}
469
470int iwm_send_packet(struct iwm_priv *iwm, struct sk_buff *skb, int pool_id)
471{
472 struct iwm_udma_wifi_cmd udma_cmd;
473 struct iwm_umac_cmd umac_cmd;
474 struct iwm_tx_info *tx_info = skb_to_tx_info(skb);
475
476 udma_cmd.eop = 1; /* always set eop for non-concatenated Tx */
477 udma_cmd.credit_group = pool_id;
478 udma_cmd.ra_tid = tx_info->sta << 4 | tx_info->tid;
479 udma_cmd.lmac_offset = 0;
480
481 umac_cmd.id = REPLY_TX;
482 umac_cmd.color = tx_info->color;
483 umac_cmd.resp = 0;
484
485 return iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd,
486 skb->data, skb->len);
487}
488
489static int iwm_target_read(struct iwm_priv *iwm, __le32 address,
490 u8 *response, u32 resp_size)
491{
492 struct iwm_udma_nonwifi_cmd target_cmd;
493 struct iwm_nonwifi_cmd *cmd;
494 u16 seq_num;
495 int ret = 0;
496
497 target_cmd.opcode = UMAC_HDI_OUT_OPCODE_READ;
498 target_cmd.addr = address;
499 target_cmd.op1_sz = cpu_to_le32(resp_size);
500 target_cmd.op2 = 0;
501 target_cmd.handle_by_hw = 0;
502 target_cmd.resp = 1;
503 target_cmd.eop = 1;
504
505 ret = iwm_hal_send_target_cmd(iwm, &target_cmd, NULL);
506 if (ret < 0) {
507 IWM_ERR(iwm, "Couldn't send READ command\n");
508 return ret;
509 }
510
511 /* When succeeding, the send_target routine returns the seq number */
512 seq_num = ret;
513
514 ret = wait_event_interruptible_timeout(iwm->nonwifi_queue,
515 (cmd = iwm_get_pending_nonwifi_cmd(iwm, seq_num,
516 UMAC_HDI_OUT_OPCODE_READ)) != NULL,
517 2 * HZ);
518
519 if (!ret) {
520 IWM_ERR(iwm, "Didn't receive a target READ answer\n");
521 return ret;
522 }
523
524 memcpy(response, cmd->buf.hdr + sizeof(struct iwm_udma_in_hdr),
525 resp_size);
526
527 kfree(cmd);
528
529 return 0;
530}
531
532int iwm_read_mac(struct iwm_priv *iwm, u8 *mac)
533{
534 int ret;
535 u8 mac_align[ALIGN(ETH_ALEN, 8)];
536
537 ret = iwm_target_read(iwm, cpu_to_le32(WICO_MAC_ADDRESS_ADDR),
538 mac_align, sizeof(mac_align));
539 if (ret)
540 return ret;
541
542 if (is_valid_ether_addr(mac_align))
543 memcpy(mac, mac_align, ETH_ALEN);
544 else {
545 IWM_ERR(iwm, "Invalid EEPROM MAC\n");
546 memcpy(mac, iwm->conf.mac_addr, ETH_ALEN);
547 get_random_bytes(&mac[3], 3);
548 }
549
550 return 0;
551}
552
553static int iwm_check_profile(struct iwm_priv *iwm)
554{
555 if (!iwm->umac_profile_active)
556 return -EAGAIN;
557
558 if (iwm->umac_profile->sec.ucast_cipher != UMAC_CIPHER_TYPE_WEP_40 &&
559 iwm->umac_profile->sec.ucast_cipher != UMAC_CIPHER_TYPE_WEP_104 &&
560 iwm->umac_profile->sec.ucast_cipher != UMAC_CIPHER_TYPE_TKIP &&
561 iwm->umac_profile->sec.ucast_cipher != UMAC_CIPHER_TYPE_CCMP) {
562 IWM_ERR(iwm, "Wrong unicast cipher: 0x%x\n",
563 iwm->umac_profile->sec.ucast_cipher);
564 return -EAGAIN;
565 }
566
567 if (iwm->umac_profile->sec.mcast_cipher != UMAC_CIPHER_TYPE_WEP_40 &&
568 iwm->umac_profile->sec.mcast_cipher != UMAC_CIPHER_TYPE_WEP_104 &&
569 iwm->umac_profile->sec.mcast_cipher != UMAC_CIPHER_TYPE_TKIP &&
570 iwm->umac_profile->sec.mcast_cipher != UMAC_CIPHER_TYPE_CCMP) {
571 IWM_ERR(iwm, "Wrong multicast cipher: 0x%x\n",
572 iwm->umac_profile->sec.mcast_cipher);
573 return -EAGAIN;
574 }
575
576 if ((iwm->umac_profile->sec.ucast_cipher == UMAC_CIPHER_TYPE_WEP_40 ||
577 iwm->umac_profile->sec.ucast_cipher == UMAC_CIPHER_TYPE_WEP_104) &&
578 (iwm->umac_profile->sec.ucast_cipher !=
579 iwm->umac_profile->sec.mcast_cipher)) {
580 IWM_ERR(iwm, "Unicast and multicast ciphers differ for WEP\n");
581 }
582
583 return 0;
584}
585
586int iwm_set_tx_key(struct iwm_priv *iwm, u8 key_idx)
587{
588 struct iwm_umac_tx_key_id tx_key_id;
589 int ret;
590
591 ret = iwm_check_profile(iwm);
592 if (ret < 0)
593 return ret;
594
595 /* UMAC only allows to set default key for WEP and auth type is
596 * NOT 802.1X or RSNA. */
597 if ((iwm->umac_profile->sec.ucast_cipher != UMAC_CIPHER_TYPE_WEP_40 &&
598 iwm->umac_profile->sec.ucast_cipher != UMAC_CIPHER_TYPE_WEP_104) ||
599 iwm->umac_profile->sec.auth_type == UMAC_AUTH_TYPE_8021X ||
600 iwm->umac_profile->sec.auth_type == UMAC_AUTH_TYPE_RSNA_PSK)
601 return 0;
602
603 tx_key_id.hdr.oid = UMAC_WIFI_IF_CMD_GLOBAL_TX_KEY_ID;
604 tx_key_id.hdr.buf_size = cpu_to_le16(sizeof(struct iwm_umac_tx_key_id) -
605 sizeof(struct iwm_umac_wifi_if));
606
607 tx_key_id.key_idx = key_idx;
608
609 return iwm_send_wifi_if_cmd(iwm, &tx_key_id, sizeof(tx_key_id), 1);
610}
611
612int iwm_set_key(struct iwm_priv *iwm, bool remove, struct iwm_key *key)
613{
614 int ret = 0;
615 u8 cmd[64], *sta_addr, *key_data, key_len;
616 s8 key_idx;
617 u16 cmd_size = 0;
618 struct iwm_umac_key_hdr *key_hdr = &key->hdr;
619 struct iwm_umac_key_wep40 *wep40 = (struct iwm_umac_key_wep40 *)cmd;
620 struct iwm_umac_key_wep104 *wep104 = (struct iwm_umac_key_wep104 *)cmd;
621 struct iwm_umac_key_tkip *tkip = (struct iwm_umac_key_tkip *)cmd;
622 struct iwm_umac_key_ccmp *ccmp = (struct iwm_umac_key_ccmp *)cmd;
623
624 if (!remove) {
625 ret = iwm_check_profile(iwm);
626 if (ret < 0)
627 return ret;
628 }
629
630 sta_addr = key->hdr.mac;
631 key_data = key->key;
632 key_len = key->key_len;
633 key_idx = key->hdr.key_idx;
634
635 if (!remove) {
636 u8 auth_type = iwm->umac_profile->sec.auth_type;
637
638 IWM_DBG_WEXT(iwm, DBG, "key_idx:%d\n", key_idx);
639 IWM_DBG_WEXT(iwm, DBG, "key_len:%d\n", key_len);
640 IWM_DBG_WEXT(iwm, DBG, "MAC:%pM, idx:%d, multicast:%d\n",
641 key_hdr->mac, key_hdr->key_idx, key_hdr->multicast);
642
643 IWM_DBG_WEXT(iwm, DBG, "profile: mcast:0x%x, ucast:0x%x\n",
644 iwm->umac_profile->sec.mcast_cipher,
645 iwm->umac_profile->sec.ucast_cipher);
646 IWM_DBG_WEXT(iwm, DBG, "profile: auth_type:0x%x, flags:0x%x\n",
647 iwm->umac_profile->sec.auth_type,
648 iwm->umac_profile->sec.flags);
649
650 switch (key->cipher) {
651 case WLAN_CIPHER_SUITE_WEP40:
652 wep40->hdr.oid = UMAC_WIFI_IF_CMD_ADD_WEP40_KEY;
653 wep40->hdr.buf_size =
654 cpu_to_le16(sizeof(struct iwm_umac_key_wep40) -
655 sizeof(struct iwm_umac_wifi_if));
656
657 memcpy(&wep40->key_hdr, key_hdr,
658 sizeof(struct iwm_umac_key_hdr));
659 memcpy(wep40->key, key_data, key_len);
660 wep40->static_key =
661 !!((auth_type != UMAC_AUTH_TYPE_8021X) &&
662 (auth_type != UMAC_AUTH_TYPE_RSNA_PSK));
663
664 cmd_size = sizeof(struct iwm_umac_key_wep40);
665 break;
666
667 case WLAN_CIPHER_SUITE_WEP104:
668 wep104->hdr.oid = UMAC_WIFI_IF_CMD_ADD_WEP104_KEY;
669 wep104->hdr.buf_size =
670 cpu_to_le16(sizeof(struct iwm_umac_key_wep104) -
671 sizeof(struct iwm_umac_wifi_if));
672
673 memcpy(&wep104->key_hdr, key_hdr,
674 sizeof(struct iwm_umac_key_hdr));
675 memcpy(wep104->key, key_data, key_len);
676 wep104->static_key =
677 !!((auth_type != UMAC_AUTH_TYPE_8021X) &&
678 (auth_type != UMAC_AUTH_TYPE_RSNA_PSK));
679
680 cmd_size = sizeof(struct iwm_umac_key_wep104);
681 break;
682
683 case WLAN_CIPHER_SUITE_CCMP:
684 key_hdr->key_idx++;
685 ccmp->hdr.oid = UMAC_WIFI_IF_CMD_ADD_CCMP_KEY;
686 ccmp->hdr.buf_size =
687 cpu_to_le16(sizeof(struct iwm_umac_key_ccmp) -
688 sizeof(struct iwm_umac_wifi_if));
689
690 memcpy(&ccmp->key_hdr, key_hdr,
691 sizeof(struct iwm_umac_key_hdr));
692
693 memcpy(ccmp->key, key_data, key_len);
694
695 if (key->seq_len)
696 memcpy(ccmp->iv_count, key->seq, key->seq_len);
697
698 cmd_size = sizeof(struct iwm_umac_key_ccmp);
699 break;
700
701 case WLAN_CIPHER_SUITE_TKIP:
702 key_hdr->key_idx++;
703 tkip->hdr.oid = UMAC_WIFI_IF_CMD_ADD_TKIP_KEY;
704 tkip->hdr.buf_size =
705 cpu_to_le16(sizeof(struct iwm_umac_key_tkip) -
706 sizeof(struct iwm_umac_wifi_if));
707
708 memcpy(&tkip->key_hdr, key_hdr,
709 sizeof(struct iwm_umac_key_hdr));
710
711 memcpy(tkip->tkip_key, key_data, IWM_TKIP_KEY_SIZE);
712 memcpy(tkip->mic_tx_key, key_data + IWM_TKIP_KEY_SIZE,
713 IWM_TKIP_MIC_SIZE);
714 memcpy(tkip->mic_rx_key,
715 key_data + IWM_TKIP_KEY_SIZE + IWM_TKIP_MIC_SIZE,
716 IWM_TKIP_MIC_SIZE);
717
718 if (key->seq_len)
719 memcpy(ccmp->iv_count, key->seq, key->seq_len);
720
721 cmd_size = sizeof(struct iwm_umac_key_tkip);
722 break;
723
724 default:
725 return -ENOTSUPP;
726 }
727
728 if ((key->cipher == WLAN_CIPHER_SUITE_TKIP) ||
729 (key->cipher == WLAN_CIPHER_SUITE_CCMP))
730 /*
731 * UGLY_UGLY_UGLY
732 * Copied HACK from the MWG driver.
733 * Without it, the key is set before the second
734 * EAPOL frame is sent, and the latter is thus
735 * encrypted.
736 */
737 schedule_timeout_interruptible(usecs_to_jiffies(300));
738
739 ret = iwm_send_wifi_if_cmd(iwm, cmd, cmd_size, 1);
740 } else {
741 struct iwm_umac_key_remove key_remove;
742
743 IWM_DBG_WEXT(iwm, ERR, "Removing key_idx:%d\n", key_idx);
744
745 key_remove.hdr.oid = UMAC_WIFI_IF_CMD_REMOVE_KEY;
746 key_remove.hdr.buf_size =
747 cpu_to_le16(sizeof(struct iwm_umac_key_remove) -
748 sizeof(struct iwm_umac_wifi_if));
749 memcpy(&key_remove.key_hdr, key_hdr,
750 sizeof(struct iwm_umac_key_hdr));
751
752 ret = iwm_send_wifi_if_cmd(iwm, &key_remove,
753 sizeof(struct iwm_umac_key_remove),
754 1);
755 if (ret)
756 return ret;
757
758 iwm->keys[key_idx].key_len = 0;
759 }
760
761 return ret;
762}
763
764
765int iwm_send_mlme_profile(struct iwm_priv *iwm)
766{
767 int ret;
768 struct iwm_umac_profile profile;
769
770 memcpy(&profile, iwm->umac_profile, sizeof(profile));
771
772 profile.hdr.oid = UMAC_WIFI_IF_CMD_SET_PROFILE;
773 profile.hdr.buf_size = cpu_to_le16(sizeof(struct iwm_umac_profile) -
774 sizeof(struct iwm_umac_wifi_if));
775
776 ret = iwm_send_wifi_if_cmd(iwm, &profile, sizeof(profile), 1);
777 if (ret) {
778 IWM_ERR(iwm, "Send profile command failed\n");
779 return ret;
780 }
781
782 set_bit(IWM_STATUS_SME_CONNECTING, &iwm->status);
783 return 0;
784}
785
786int __iwm_invalidate_mlme_profile(struct iwm_priv *iwm)
787{
788 struct iwm_umac_invalidate_profile invalid;
789
790 invalid.hdr.oid = UMAC_WIFI_IF_CMD_INVALIDATE_PROFILE;
791 invalid.hdr.buf_size =
792 cpu_to_le16(sizeof(struct iwm_umac_invalidate_profile) -
793 sizeof(struct iwm_umac_wifi_if));
794
795 invalid.reason = WLAN_REASON_UNSPECIFIED;
796
797 return iwm_send_wifi_if_cmd(iwm, &invalid, sizeof(invalid), 1);
798}
799
800int iwm_invalidate_mlme_profile(struct iwm_priv *iwm)
801{
802 int ret;
803
804 ret = __iwm_invalidate_mlme_profile(iwm);
805 if (ret)
806 return ret;
807
808 ret = wait_event_interruptible_timeout(iwm->mlme_queue,
809 (iwm->umac_profile_active == 0), 5 * HZ);
810
811 return ret ? 0 : -EBUSY;
812}
813
814int iwm_tx_power_trigger(struct iwm_priv *iwm)
815{
816 struct iwm_umac_pwr_trigger pwr_trigger;
817
818 pwr_trigger.hdr.oid = UMAC_WIFI_IF_CMD_TX_PWR_TRIGGER;
819 pwr_trigger.hdr.buf_size =
820 cpu_to_le16(sizeof(struct iwm_umac_pwr_trigger) -
821 sizeof(struct iwm_umac_wifi_if));
822
823
824 return iwm_send_wifi_if_cmd(iwm, &pwr_trigger, sizeof(pwr_trigger), 1);
825}
826
827int iwm_send_umac_stats_req(struct iwm_priv *iwm, u32 flags)
828{
829 struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
830 struct iwm_umac_cmd umac_cmd;
831 struct iwm_umac_cmd_stats_req stats_req;
832
833 stats_req.flags = cpu_to_le32(flags);
834
835 umac_cmd.id = UMAC_CMD_OPCODE_STATISTIC_REQUEST;
836 umac_cmd.resp = 0;
837
838 return iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd, &stats_req,
839 sizeof(struct iwm_umac_cmd_stats_req));
840}
841
842int iwm_send_umac_channel_list(struct iwm_priv *iwm)
843{
844 struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
845 struct iwm_umac_cmd umac_cmd;
846 struct iwm_umac_cmd_get_channel_list *ch_list;
847 int size = sizeof(struct iwm_umac_cmd_get_channel_list) +
848 sizeof(struct iwm_umac_channel_info) * 4;
849 int ret;
850
851 ch_list = kzalloc(size, GFP_KERNEL);
852 if (!ch_list) {
853 IWM_ERR(iwm, "Couldn't allocate channel list cmd\n");
854 return -ENOMEM;
855 }
856
857 ch_list->ch[0].band = UMAC_BAND_2GHZ;
858 ch_list->ch[0].type = UMAC_CHANNEL_WIDTH_20MHZ;
859 ch_list->ch[0].flags = UMAC_CHANNEL_FLAG_VALID;
860
861 ch_list->ch[1].band = UMAC_BAND_5GHZ;
862 ch_list->ch[1].type = UMAC_CHANNEL_WIDTH_20MHZ;
863 ch_list->ch[1].flags = UMAC_CHANNEL_FLAG_VALID;
864
865 ch_list->ch[2].band = UMAC_BAND_2GHZ;
866 ch_list->ch[2].type = UMAC_CHANNEL_WIDTH_20MHZ;
867 ch_list->ch[2].flags = UMAC_CHANNEL_FLAG_VALID | UMAC_CHANNEL_FLAG_IBSS;
868
869 ch_list->ch[3].band = UMAC_BAND_5GHZ;
870 ch_list->ch[3].type = UMAC_CHANNEL_WIDTH_20MHZ;
871 ch_list->ch[3].flags = UMAC_CHANNEL_FLAG_VALID | UMAC_CHANNEL_FLAG_IBSS;
872
873 ch_list->count = cpu_to_le16(4);
874
875 umac_cmd.id = UMAC_CMD_OPCODE_GET_CHAN_INFO_LIST;
876 umac_cmd.resp = 1;
877
878 ret = iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd, ch_list, size);
879
880 kfree(ch_list);
881
882 return ret;
883}
884
885int iwm_scan_ssids(struct iwm_priv *iwm, struct cfg80211_ssid *ssids,
886 int ssid_num)
887{
888 struct iwm_umac_cmd_scan_request req;
889 int i, ret;
890
891 memset(&req, 0, sizeof(struct iwm_umac_cmd_scan_request));
892
893 req.hdr.oid = UMAC_WIFI_IF_CMD_SCAN_REQUEST;
894 req.hdr.buf_size = cpu_to_le16(sizeof(struct iwm_umac_cmd_scan_request)
895 - sizeof(struct iwm_umac_wifi_if));
896 req.type = UMAC_WIFI_IF_SCAN_TYPE_USER;
897 req.timeout = 2;
898 req.seq_num = iwm->scan_id;
899 req.ssid_num = min(ssid_num, UMAC_WIFI_IF_PROBE_OPTION_MAX);
900
901 for (i = 0; i < req.ssid_num; i++) {
902 memcpy(req.ssids[i].ssid, ssids[i].ssid, ssids[i].ssid_len);
903 req.ssids[i].ssid_len = ssids[i].ssid_len;
904 }
905
906 ret = iwm_send_wifi_if_cmd(iwm, &req, sizeof(req), 0);
907 if (ret) {
908 IWM_ERR(iwm, "Couldn't send scan request\n");
909 return ret;
910 }
911
912 iwm->scan_id = (iwm->scan_id + 1) % IWM_SCAN_ID_MAX;
913
914 return 0;
915}
916
917int iwm_scan_one_ssid(struct iwm_priv *iwm, u8 *ssid, int ssid_len)
918{
919 struct cfg80211_ssid one_ssid;
920
921 if (test_and_set_bit(IWM_STATUS_SCANNING, &iwm->status))
922 return 0;
923
924 one_ssid.ssid_len = min(ssid_len, IEEE80211_MAX_SSID_LEN);
925 memcpy(&one_ssid.ssid, ssid, one_ssid.ssid_len);
926
927 return iwm_scan_ssids(iwm, &one_ssid, 1);
928}
929
930int iwm_target_reset(struct iwm_priv *iwm)
931{
932 struct iwm_udma_nonwifi_cmd target_cmd;
933
934 target_cmd.opcode = UMAC_HDI_OUT_OPCODE_REBOOT;
935 target_cmd.addr = 0;
936 target_cmd.op1_sz = 0;
937 target_cmd.op2 = 0;
938 target_cmd.handle_by_hw = 0;
939 target_cmd.resp = 0;
940 target_cmd.eop = 1;
941
942 return iwm_hal_send_target_cmd(iwm, &target_cmd, NULL);
943}
944
945int iwm_send_umac_stop_resume_tx(struct iwm_priv *iwm,
946 struct iwm_umac_notif_stop_resume_tx *ntf)
947{
948 struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
949 struct iwm_umac_cmd umac_cmd;
950 struct iwm_umac_cmd_stop_resume_tx stp_res_cmd;
951 struct iwm_sta_info *sta_info;
952 u8 sta_id = STA_ID_N_COLOR_ID(ntf->sta_id);
953 int i;
954
955 sta_info = &iwm->sta_table[sta_id];
956 if (!sta_info->valid) {
957 IWM_ERR(iwm, "Invalid STA: %d\n", sta_id);
958 return -EINVAL;
959 }
960
961 umac_cmd.id = UMAC_CMD_OPCODE_STOP_RESUME_STA_TX;
962 umac_cmd.resp = 0;
963
964 stp_res_cmd.flags = ntf->flags;
965 stp_res_cmd.sta_id = ntf->sta_id;
966 stp_res_cmd.stop_resume_tid_msk = ntf->stop_resume_tid_msk;
967 for (i = 0; i < IWM_UMAC_TID_NR; i++)
968 stp_res_cmd.last_seq_num[i] =
969 sta_info->tid_info[i].last_seq_num;
970
971 return iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd, &stp_res_cmd,
972 sizeof(struct iwm_umac_cmd_stop_resume_tx));
973
974}
975
976int iwm_send_pmkid_update(struct iwm_priv *iwm,
977 struct cfg80211_pmksa *pmksa, u32 command)
978{
979 struct iwm_umac_pmkid_update update;
980 int ret;
981
982 memset(&update, 0, sizeof(struct iwm_umac_pmkid_update));
983
984 update.hdr.oid = UMAC_WIFI_IF_CMD_PMKID_UPDATE;
985 update.hdr.buf_size = cpu_to_le16(sizeof(struct iwm_umac_pmkid_update) -
986 sizeof(struct iwm_umac_wifi_if));
987
988 update.command = cpu_to_le32(command);
989 if (pmksa->bssid)
990 memcpy(&update.bssid, pmksa->bssid, ETH_ALEN);
991 if (pmksa->pmkid)
992 memcpy(&update.pmkid, pmksa->pmkid, WLAN_PMKID_LEN);
993
994 ret = iwm_send_wifi_if_cmd(iwm, &update,
995 sizeof(struct iwm_umac_pmkid_update), 0);
996 if (ret) {
997 IWM_ERR(iwm, "PMKID update command failed\n");
998 return ret;
999 }
1000
1001 return 0;
1002}
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.h b/drivers/net/wireless/iwmc3200wifi/commands.h
deleted file mode 100644
index 6421689f5e8e..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/commands.h
+++ /dev/null
@@ -1,509 +0,0 @@
1/*
2 * Intel Wireless Multicomm 3200 WiFi driver
3 *
4 * Copyright (C) 2009 Intel Corporation. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 *
33 * Intel Corporation <ilw@linux.intel.com>
34 * Samuel Ortiz <samuel.ortiz@intel.com>
35 * Zhu Yi <yi.zhu@intel.com>
36 *
37 */
38
39#ifndef __IWM_COMMANDS_H__
40#define __IWM_COMMANDS_H__
41
42#include <linux/ieee80211.h>
43
44#define IWM_BARKER_REBOOT_NOTIFICATION 0xF
45#define IWM_ACK_BARKER_NOTIFICATION 0x10
46
47/* UMAC commands */
48#define UMAC_RST_CTRL_FLG_LARC_CLK_EN 0x0001
49#define UMAC_RST_CTRL_FLG_LARC_RESET 0x0002
50#define UMAC_RST_CTRL_FLG_FUNC_RESET 0x0004
51#define UMAC_RST_CTRL_FLG_DEV_RESET 0x0008
52#define UMAC_RST_CTRL_FLG_WIFI_CORE_EN 0x0010
53#define UMAC_RST_CTRL_FLG_WIFI_LINK_EN 0x0040
54#define UMAC_RST_CTRL_FLG_WIFI_MLME_EN 0x0080
55#define UMAC_RST_CTRL_FLG_NVM_RELOAD 0x0100
56
57struct iwm_umac_cmd_reset {
58 __le32 flags;
59} __packed;
60
61#define UMAC_PARAM_TBL_ORD_FIX 0x0
62#define UMAC_PARAM_TBL_ORD_VAR 0x1
63#define UMAC_PARAM_TBL_CFG_FIX 0x2
64#define UMAC_PARAM_TBL_CFG_VAR 0x3
65#define UMAC_PARAM_TBL_BSS_TRK 0x4
66#define UMAC_PARAM_TBL_FA_CFG_FIX 0x5
67#define UMAC_PARAM_TBL_STA 0x6
68#define UMAC_PARAM_TBL_CHN 0x7
69#define UMAC_PARAM_TBL_STATISTICS 0x8
70
71/* fast access table */
72enum {
73 CFG_FRAG_THRESHOLD = 0,
74 CFG_FRAME_RETRY_LIMIT,
75 CFG_OS_QUEUE_UTIL_TH,
76 CFG_RX_FILTER,
77 /* <-- LAST --> */
78 FAST_ACCESS_CFG_TBL_FIX_LAST
79};
80
81/* fixed size table */
82enum {
83 CFG_POWER_INDEX = 0,
84 CFG_PM_LEGACY_RX_TIMEOUT,
85 CFG_PM_LEGACY_TX_TIMEOUT,
86 CFG_PM_CTRL_FLAGS,
87 CFG_PM_KEEP_ALIVE_IN_BEACONS,
88 CFG_BT_ON_THRESHOLD,
89 CFG_RTS_THRESHOLD,
90 CFG_CTS_TO_SELF,
91 CFG_COEX_MODE,
92 CFG_WIRELESS_MODE,
93 CFG_ASSOCIATION_TIMEOUT,
94 CFG_ROAM_TIMEOUT,
95 CFG_CAPABILITY_SUPPORTED_RATES,
96 CFG_SCAN_ALLOWED_UNASSOC_FLAGS,
97 CFG_SCAN_ALLOWED_MAIN_ASSOC_FLAGS,
98 CFG_SCAN_ALLOWED_PAN_ASSOC_FLAGS,
99 CFG_SCAN_INTERNAL_PERIODIC_ENABLED,
100 CFG_SCAN_IMM_INTERNAL_PERIODIC_SCAN_ON_INIT,
101 CFG_SCAN_DEFAULT_PERIODIC_FREQ_SEC,
102 CFG_SCAN_NUM_PASSIVE_CHAN_PER_PARTIAL_SCAN,
103 CFG_TLC_SUPPORTED_TX_HT_RATES,
104 CFG_TLC_SUPPORTED_TX_RATES,
105 CFG_TLC_SPATIAL_STREAM_SUPPORTED,
106 CFG_TLC_RETRY_PER_RATE,
107 CFG_TLC_RETRY_PER_HT_RATE,
108 CFG_TLC_FIXED_MCS,
109 CFG_TLC_CONTROL_FLAGS,
110 CFG_TLC_SR_MIN_FAIL,
111 CFG_TLC_SR_MIN_PASS,
112 CFG_TLC_HT_STAY_IN_COL_PASS_THRESH,
113 CFG_TLC_HT_STAY_IN_COL_FAIL_THRESH,
114 CFG_TLC_LEGACY_STAY_IN_COL_PASS_THRESH,
115 CFG_TLC_LEGACY_STAY_IN_COL_FAIL_THRESH,
116 CFG_TLC_HT_FLUSH_STATS_PACKETS,
117 CFG_TLC_LEGACY_FLUSH_STATS_PACKETS,
118 CFG_TLC_LEGACY_FLUSH_STATS_MS,
119 CFG_TLC_HT_FLUSH_STATS_MS,
120 CFG_TLC_STAY_IN_COL_TIME_OUT,
121 CFG_TLC_AGG_SHORT_LIM,
122 CFG_TLC_AGG_LONG_LIM,
123 CFG_TLC_HT_SR_NO_DECREASE,
124 CFG_TLC_LEGACY_SR_NO_DECREASE,
125 CFG_TLC_SR_FORCE_DECREASE,
126 CFG_TLC_SR_ALLOW_INCREASE,
127 CFG_TLC_AGG_SET_LONG,
128 CFG_TLC_AUTO_AGGREGATION,
129 CFG_TLC_AGG_THRESHOLD,
130 CFG_TLC_TID_LOAD_THRESHOLD,
131 CFG_TLC_BLOCK_ACK_TIMEOUT,
132 CFG_TLC_NO_BA_COUNTED_AS_ONE,
133 CFG_TLC_NUM_BA_STREAMS_ALLOWED,
134 CFG_TLC_NUM_BA_STREAMS_PRESENT,
135 CFG_TLC_RENEW_ADDBA_DELAY,
136 CFG_TLC_NUM_OF_MULTISEC_TO_COUN_LOAD,
137 CFG_TLC_IS_STABLE_IN_HT,
138 CFG_TLC_SR_SIC_1ST_FAIL,
139 CFG_TLC_SR_SIC_1ST_PASS,
140 CFG_TLC_SR_SIC_TOTAL_FAIL,
141 CFG_TLC_SR_SIC_TOTAL_PASS,
142 CFG_RLC_CHAIN_CTRL,
143 CFG_TRK_TABLE_OP_MODE,
144 CFG_TRK_TABLE_RSSI_THRESHOLD,
145 CFG_TX_PWR_TARGET, /* Used By xVT */
146 CFG_TX_PWR_LIMIT_USR,
147 CFG_TX_PWR_LIMIT_BSS, /* 11d limit */
148 CFG_TX_PWR_LIMIT_BSS_CONSTRAINT, /* 11h constraint */
149 CFG_TX_PWR_MODE,
150 CFG_MLME_DBG_NOTIF_BLOCK,
151 CFG_BT_OFF_BECONS_INTERVALS,
152 CFG_BT_FRAG_DURATION,
153 CFG_ACTIVE_CHAINS,
154 CFG_CALIB_CTRL,
155 CFG_CAPABILITY_SUPPORTED_HT_RATES,
156 CFG_HT_MAC_PARAM_INFO,
157 CFG_MIMO_PS_MODE,
158 CFG_HT_DEFAULT_CAPABILIES_INFO,
159 CFG_LED_SC_RESOLUTION_FACTOR,
160 CFG_PTAM_ENERGY_CCK_DET_DEFAULT,
161 CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_DEFAULT,
162 CFG_PTAM_CORR40_4_TH_ADD_MIN_DEFAULT,
163 CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_DEFAULT,
164 CFG_PTAM_CORR32_4_TH_ADD_MIN_DEFAULT,
165 CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_DEFAULT,
166 CFG_PTAM_CORR32_1_TH_ADD_MIN_DEFAULT,
167 CFG_PTAM_ENERGY_CCK_DET_MIN_VAL,
168 CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_MIN_VAL,
169 CFG_PTAM_CORR40_4_TH_ADD_MIN_MIN_VAL,
170 CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_MIN_VAL,
171 CFG_PTAM_CORR32_4_TH_ADD_MIN_MIN_VAL,
172 CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_MIN_VAL,
173 CFG_PTAM_CORR32_1_TH_ADD_MIN_MIN_VAL,
174 CFG_PTAM_ENERGY_CCK_DET_MAX_VAL,
175 CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_MAX_VAL,
176 CFG_PTAM_CORR40_4_TH_ADD_MIN_MAX_VAL,
177 CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_MAX_VAL,
178 CFG_PTAM_CORR32_4_TH_ADD_MIN_MAX_VAL,
179 CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_MAX_VAL,
180 CFG_PTAM_CORR32_1_TH_ADD_MIN_MAX_VAL,
181 CFG_PTAM_ENERGY_CCK_DET_STEP_VAL,
182 CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_STEP_VAL,
183 CFG_PTAM_CORR40_4_TH_ADD_MIN_STEP_VAL,
184 CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_STEP_VAL,
185 CFG_PTAM_CORR32_4_TH_ADD_MIN_STEP_VAL,
186 CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_STEP_VAL,
187 CFG_PTAM_CORR32_1_TH_ADD_MIN_STEP_VAL,
188 CFG_PTAM_LINK_SENS_FA_OFDM_MAX,
189 CFG_PTAM_LINK_SENS_FA_OFDM_MIN,
190 CFG_PTAM_LINK_SENS_FA_CCK_MAX,
191 CFG_PTAM_LINK_SENS_FA_CCK_MIN,
192 CFG_PTAM_LINK_SENS_NRG_DIFF,
193 CFG_PTAM_LINK_SENS_NRG_MARGIN,
194 CFG_PTAM_LINK_SENS_MAX_NUMBER_OF_TIMES_IN_CCK_NO_FA,
195 CFG_PTAM_LINK_SENS_AUTO_CORR_MAX_TH_CCK,
196 CFG_AGG_MGG_TID_LOAD_ADDBA_THRESHOLD,
197 CFG_AGG_MGG_TID_LOAD_DELBA_THRESHOLD,
198 CFG_AGG_MGG_ADDBA_BUF_SIZE,
199 CFG_AGG_MGG_ADDBA_INACTIVE_TIMEOUT,
200 CFG_AGG_MGG_ADDBA_DEBUG_FLAGS,
201 CFG_SCAN_PERIODIC_RSSI_HIGH_THRESHOLD,
202 CFG_SCAN_PERIODIC_COEF_RSSI_HIGH,
203 CFG_11D_ENABLED,
204 CFG_11H_FEATURE_FLAGS,
205
206 /* <-- LAST --> */
207 CFG_TBL_FIX_LAST
208};
209
210/* variable size table */
211enum {
212 CFG_NET_ADDR = 0,
213 CFG_LED_PATTERN_TABLE,
214
215 /* <-- LAST --> */
216 CFG_TBL_VAR_LAST
217};
218
219struct iwm_umac_cmd_set_param_fix {
220 __le16 tbl;
221 __le16 key;
222 __le32 value;
223} __packed;
224
225struct iwm_umac_cmd_set_param_var {
226 __le16 tbl;
227 __le16 key;
228 __le16 len;
229 __le16 reserved;
230} __packed;
231
232struct iwm_umac_cmd_get_param {
233 __le16 tbl;
234 __le16 key;
235} __packed;
236
237struct iwm_umac_cmd_get_param_resp {
238 __le16 tbl;
239 __le16 key;
240 __le16 len;
241 __le16 reserved;
242} __packed;
243
244struct iwm_umac_cmd_eeprom_proxy_hdr {
245 __le32 type;
246 __le32 offset;
247 __le32 len;
248} __packed;
249
250struct iwm_umac_cmd_eeprom_proxy {
251 struct iwm_umac_cmd_eeprom_proxy_hdr hdr;
252 u8 buf[0];
253} __packed;
254
255#define IWM_UMAC_CMD_EEPROM_TYPE_READ 0x1
256#define IWM_UMAC_CMD_EEPROM_TYPE_WRITE 0x2
257
258#define UMAC_CHANNEL_FLAG_VALID BIT(0)
259#define UMAC_CHANNEL_FLAG_IBSS BIT(1)
260#define UMAC_CHANNEL_FLAG_ACTIVE BIT(3)
261#define UMAC_CHANNEL_FLAG_RADAR BIT(4)
262#define UMAC_CHANNEL_FLAG_DFS BIT(7)
263
264struct iwm_umac_channel_info {
265 u8 band;
266 u8 type;
267 u8 reserved;
268 u8 flags;
269 __le32 channels_mask;
270} __packed;
271
272struct iwm_umac_cmd_get_channel_list {
273 __le16 count;
274 __le16 reserved;
275 struct iwm_umac_channel_info ch[0];
276} __packed;
277
278
279/* UMAC WiFi interface commands */
280
281/* Coexistence mode */
282#define COEX_MODE_SA 0x1
283#define COEX_MODE_XOR 0x2
284#define COEX_MODE_CM 0x3
285#define COEX_MODE_MAX 0x4
286
287/* Wireless mode */
288#define WIRELESS_MODE_11A 0x1
289#define WIRELESS_MODE_11G 0x2
290#define WIRELESS_MODE_11N 0x4
291
292#define UMAC_PROFILE_EX_IE_REQUIRED 0x1
293#define UMAC_PROFILE_QOS_ALLOWED 0x2
294
295/* Scanning */
296#define UMAC_WIFI_IF_PROBE_OPTION_MAX 10
297
298#define UMAC_WIFI_IF_SCAN_TYPE_USER 0x0
299#define UMAC_WIFI_IF_SCAN_TYPE_UMAC_RESERVED 0x1
300#define UMAC_WIFI_IF_SCAN_TYPE_HOST_PERIODIC 0x2
301#define UMAC_WIFI_IF_SCAN_TYPE_MAX 0x3
302
303struct iwm_umac_ssid {
304 u8 ssid_len;
305 u8 ssid[IEEE80211_MAX_SSID_LEN];
306 u8 reserved[3];
307} __packed;
308
309struct iwm_umac_cmd_scan_request {
310 struct iwm_umac_wifi_if hdr;
311 __le32 type; /* UMAC_WIFI_IF_SCAN_TYPE_* */
312 u8 ssid_num;
313 u8 seq_num;
314 u8 timeout; /* In seconds */
315 u8 reserved;
316 struct iwm_umac_ssid ssids[UMAC_WIFI_IF_PROBE_OPTION_MAX];
317} __packed;
318
319#define UMAC_CIPHER_TYPE_NONE 0xFF
320#define UMAC_CIPHER_TYPE_USE_GROUPCAST 0x00
321#define UMAC_CIPHER_TYPE_WEP_40 0x01
322#define UMAC_CIPHER_TYPE_WEP_104 0x02
323#define UMAC_CIPHER_TYPE_TKIP 0x04
324#define UMAC_CIPHER_TYPE_CCMP 0x08
325
326/* Supported authentication types - bitmap */
327#define UMAC_AUTH_TYPE_OPEN 0x00
328#define UMAC_AUTH_TYPE_LEGACY_PSK 0x01
329#define UMAC_AUTH_TYPE_8021X 0x02
330#define UMAC_AUTH_TYPE_RSNA_PSK 0x04
331
332/* iwm_umac_security.flag is WPA supported -- bits[0:0] */
333#define UMAC_SEC_FLG_WPA_ON_POS 0
334#define UMAC_SEC_FLG_WPA_ON_SEED 1
335#define UMAC_SEC_FLG_WPA_ON_MSK (UMAC_SEC_FLG_WPA_ON_SEED << \
336 UMAC_SEC_FLG_WPA_ON_POS)
337
338/* iwm_umac_security.flag is WPA2 supported -- bits [1:1] */
339#define UMAC_SEC_FLG_RSNA_ON_POS 1
340#define UMAC_SEC_FLG_RSNA_ON_SEED 1
341#define UMAC_SEC_FLG_RSNA_ON_MSK (UMAC_SEC_FLG_RSNA_ON_SEED << \
342 UMAC_SEC_FLG_RSNA_ON_POS)
343
344/* iwm_umac_security.flag is WSC mode on -- bits [2:2] */
345#define UMAC_SEC_FLG_WSC_ON_POS 2
346#define UMAC_SEC_FLG_WSC_ON_SEED 1
347#define UMAC_SEC_FLG_WSC_ON_MSK (UMAC_SEC_FLG_WSC_ON_SEED << \
348 UMAC_SEC_FLG_WSC_ON_POS)
349
350
351/* Legacy profile can use only WEP40 and WEP104 for encryption and
352 * OPEN or PSK for authentication */
353#define UMAC_SEC_FLG_LEGACY_PROFILE 0
354
355struct iwm_umac_security {
356 u8 auth_type;
357 u8 ucast_cipher;
358 u8 mcast_cipher;
359 u8 flags;
360} __packed;
361
362struct iwm_umac_ibss {
363 u8 beacon_interval; /* in millisecond */
364 u8 atim; /* in millisecond */
365 s8 join_only;
366 u8 band;
367 u8 channel;
368 u8 reserved[3];
369} __packed;
370
371#define UMAC_MODE_BSS 0
372#define UMAC_MODE_IBSS 1
373
374#define UMAC_BSSID_MAX 4
375
376struct iwm_umac_profile {
377 struct iwm_umac_wifi_if hdr;
378 __le32 mode;
379 struct iwm_umac_ssid ssid;
380 u8 bssid[UMAC_BSSID_MAX][ETH_ALEN];
381 struct iwm_umac_security sec;
382 struct iwm_umac_ibss ibss;
383 __le32 channel_2ghz;
384 __le32 channel_5ghz;
385 __le16 flags;
386 u8 wireless_mode;
387 u8 bss_num;
388} __packed;
389
390struct iwm_umac_invalidate_profile {
391 struct iwm_umac_wifi_if hdr;
392 u8 reason;
393 u8 reserved[3];
394} __packed;
395
396/* Encryption key commands */
397struct iwm_umac_key_wep40 {
398 struct iwm_umac_wifi_if hdr;
399 struct iwm_umac_key_hdr key_hdr;
400 u8 key[WLAN_KEY_LEN_WEP40];
401 u8 static_key;
402 u8 reserved[2];
403} __packed;
404
405struct iwm_umac_key_wep104 {
406 struct iwm_umac_wifi_if hdr;
407 struct iwm_umac_key_hdr key_hdr;
408 u8 key[WLAN_KEY_LEN_WEP104];
409 u8 static_key;
410 u8 reserved[2];
411} __packed;
412
413#define IWM_TKIP_KEY_SIZE 16
414#define IWM_TKIP_MIC_SIZE 8
415struct iwm_umac_key_tkip {
416 struct iwm_umac_wifi_if hdr;
417 struct iwm_umac_key_hdr key_hdr;
418 u8 iv_count[6];
419 u8 reserved[2];
420 u8 tkip_key[IWM_TKIP_KEY_SIZE];
421 u8 mic_rx_key[IWM_TKIP_MIC_SIZE];
422 u8 mic_tx_key[IWM_TKIP_MIC_SIZE];
423} __packed;
424
425struct iwm_umac_key_ccmp {
426 struct iwm_umac_wifi_if hdr;
427 struct iwm_umac_key_hdr key_hdr;
428 u8 iv_count[6];
429 u8 reserved[2];
430 u8 key[WLAN_KEY_LEN_CCMP];
431} __packed;
432
433struct iwm_umac_key_remove {
434 struct iwm_umac_wifi_if hdr;
435 struct iwm_umac_key_hdr key_hdr;
436} __packed;
437
438struct iwm_umac_tx_key_id {
439 struct iwm_umac_wifi_if hdr;
440 u8 key_idx;
441 u8 reserved[3];
442} __packed;
443
444struct iwm_umac_pwr_trigger {
445 struct iwm_umac_wifi_if hdr;
446 __le32 reseved;
447} __packed;
448
449struct iwm_umac_cmd_stats_req {
450 __le32 flags;
451} __packed;
452
453struct iwm_umac_cmd_stop_resume_tx {
454 u8 flags;
455 u8 sta_id;
456 __le16 stop_resume_tid_msk;
457 __le16 last_seq_num[IWM_UMAC_TID_NR];
458 u16 reserved;
459} __packed;
460
461#define IWM_CMD_PMKID_ADD 1
462#define IWM_CMD_PMKID_DEL 2
463#define IWM_CMD_PMKID_FLUSH 3
464
465struct iwm_umac_pmkid_update {
466 struct iwm_umac_wifi_if hdr;
467 __le32 command;
468 u8 bssid[ETH_ALEN];
469 __le16 reserved;
470 u8 pmkid[WLAN_PMKID_LEN];
471} __packed;
472
473/* LMAC commands */
474int iwm_read_mac(struct iwm_priv *iwm, u8 *mac);
475int iwm_send_prio_table(struct iwm_priv *iwm);
476int iwm_send_init_calib_cfg(struct iwm_priv *iwm, u8 calib_requested);
477int iwm_send_periodic_calib_cfg(struct iwm_priv *iwm, u8 calib_requested);
478int iwm_send_calib_results(struct iwm_priv *iwm);
479int iwm_store_rxiq_calib_result(struct iwm_priv *iwm);
480int iwm_send_ct_kill_cfg(struct iwm_priv *iwm, u8 entry, u8 exit);
481
482/* UMAC commands */
483int iwm_send_wifi_if_cmd(struct iwm_priv *iwm, void *payload, u16 payload_size,
484 bool resp);
485int iwm_send_umac_reset(struct iwm_priv *iwm, __le32 reset_flags, bool resp);
486int iwm_umac_set_config_fix(struct iwm_priv *iwm, u16 tbl, u16 key, u32 value);
487int iwm_umac_set_config_var(struct iwm_priv *iwm, u16 key,
488 void *payload, u16 payload_size);
489int iwm_send_umac_config(struct iwm_priv *iwm, __le32 reset_flags);
490int iwm_send_mlme_profile(struct iwm_priv *iwm);
491int __iwm_invalidate_mlme_profile(struct iwm_priv *iwm);
492int iwm_invalidate_mlme_profile(struct iwm_priv *iwm);
493int iwm_send_packet(struct iwm_priv *iwm, struct sk_buff *skb, int pool_id);
494int iwm_set_tx_key(struct iwm_priv *iwm, u8 key_idx);
495int iwm_set_key(struct iwm_priv *iwm, bool remove, struct iwm_key *key);
496int iwm_tx_power_trigger(struct iwm_priv *iwm);
497int iwm_send_umac_stats_req(struct iwm_priv *iwm, u32 flags);
498int iwm_send_umac_channel_list(struct iwm_priv *iwm);
499int iwm_scan_ssids(struct iwm_priv *iwm, struct cfg80211_ssid *ssids,
500 int ssid_num);
501int iwm_scan_one_ssid(struct iwm_priv *iwm, u8 *ssid, int ssid_len);
502int iwm_send_umac_stop_resume_tx(struct iwm_priv *iwm,
503 struct iwm_umac_notif_stop_resume_tx *ntf);
504int iwm_send_pmkid_update(struct iwm_priv *iwm,
505 struct cfg80211_pmksa *pmksa, u32 command);
506
507/* UDMA commands */
508int iwm_target_reset(struct iwm_priv *iwm);
509#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/debug.h b/drivers/net/wireless/iwmc3200wifi/debug.h
deleted file mode 100644
index a0c13a49ab3c..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/debug.h
+++ /dev/null
@@ -1,123 +0,0 @@
1/*
2 * Intel Wireless Multicomm 3200 WiFi driver
3 *
4 * Copyright (C) 2009 Intel Corporation <ilw@linux.intel.com>
5 * Samuel Ortiz <samuel.ortiz@intel.com>
6 * Zhu Yi <yi.zhu@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 */
23
24#ifndef __IWM_DEBUG_H__
25#define __IWM_DEBUG_H__
26
27#define IWM_ERR(p, f, a...) dev_err(iwm_to_dev(p), f, ## a)
28#define IWM_WARN(p, f, a...) dev_warn(iwm_to_dev(p), f, ## a)
29#define IWM_INFO(p, f, a...) dev_info(iwm_to_dev(p), f, ## a)
30#define IWM_CRIT(p, f, a...) dev_crit(iwm_to_dev(p), f, ## a)
31
32#ifdef CONFIG_IWM_DEBUG
33
34#define IWM_DEBUG_MODULE(i, level, module, f, a...) \
35do { \
36 if (unlikely(i->dbg.dbg_module[IWM_DM_##module] >= (IWM_DL_##level)))\
37 dev_printk(KERN_INFO, (iwm_to_dev(i)), \
38 "%s " f, __func__ , ## a); \
39} while (0)
40
41#define IWM_HEXDUMP(i, level, module, pref, buf, len) \
42do { \
43 if (unlikely(i->dbg.dbg_module[IWM_DM_##module] >= (IWM_DL_##level)))\
44 print_hex_dump(KERN_INFO, pref, DUMP_PREFIX_OFFSET, \
45 16, 1, buf, len, 1); \
46} while (0)
47
48#else
49
50#define IWM_DEBUG_MODULE(i, level, module, f, a...)
51#define IWM_HEXDUMP(i, level, module, pref, buf, len)
52
53#endif /* CONFIG_IWM_DEBUG */
54
55/* Debug modules */
56enum iwm_debug_module_id {
57 IWM_DM_BOOT = 0,
58 IWM_DM_FW,
59 IWM_DM_SDIO,
60 IWM_DM_NTF,
61 IWM_DM_RX,
62 IWM_DM_TX,
63 IWM_DM_MLME,
64 IWM_DM_CMD,
65 IWM_DM_WEXT,
66 __IWM_DM_NR,
67};
68#define IWM_DM_DEFAULT 0
69
70#define IWM_DBG_BOOT(i, l, f, a...) IWM_DEBUG_MODULE(i, l, BOOT, f, ## a)
71#define IWM_DBG_FW(i, l, f, a...) IWM_DEBUG_MODULE(i, l, FW, f, ## a)
72#define IWM_DBG_SDIO(i, l, f, a...) IWM_DEBUG_MODULE(i, l, SDIO, f, ## a)
73#define IWM_DBG_NTF(i, l, f, a...) IWM_DEBUG_MODULE(i, l, NTF, f, ## a)
74#define IWM_DBG_RX(i, l, f, a...) IWM_DEBUG_MODULE(i, l, RX, f, ## a)
75#define IWM_DBG_TX(i, l, f, a...) IWM_DEBUG_MODULE(i, l, TX, f, ## a)
76#define IWM_DBG_MLME(i, l, f, a...) IWM_DEBUG_MODULE(i, l, MLME, f, ## a)
77#define IWM_DBG_CMD(i, l, f, a...) IWM_DEBUG_MODULE(i, l, CMD, f, ## a)
78#define IWM_DBG_WEXT(i, l, f, a...) IWM_DEBUG_MODULE(i, l, WEXT, f, ## a)
79
80/* Debug levels */
81enum iwm_debug_level {
82 IWM_DL_NONE = 0,
83 IWM_DL_ERR,
84 IWM_DL_WARN,
85 IWM_DL_INFO,
86 IWM_DL_DBG,
87};
88#define IWM_DL_DEFAULT IWM_DL_ERR
89
90struct iwm_debugfs {
91 struct iwm_priv *iwm;
92 struct dentry *rootdir;
93 struct dentry *devdir;
94 struct dentry *dbgdir;
95 struct dentry *txdir;
96 struct dentry *rxdir;
97 struct dentry *busdir;
98
99 u32 dbg_level;
100 struct dentry *dbg_level_dentry;
101
102 unsigned long dbg_modules;
103 struct dentry *dbg_modules_dentry;
104
105 u8 dbg_module[__IWM_DM_NR];
106 struct dentry *dbg_module_dentries[__IWM_DM_NR];
107
108 struct dentry *txq_dentry;
109 struct dentry *tx_credit_dentry;
110 struct dentry *rx_ticket_dentry;
111
112 struct dentry *fw_err_dentry;
113};
114
115#ifdef CONFIG_IWM_DEBUG
116void iwm_debugfs_init(struct iwm_priv *iwm);
117void iwm_debugfs_exit(struct iwm_priv *iwm);
118#else
119static inline void iwm_debugfs_init(struct iwm_priv *iwm) {}
120static inline void iwm_debugfs_exit(struct iwm_priv *iwm) {}
121#endif
122
123#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
deleted file mode 100644
index b6199d124bb9..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
+++ /dev/null
@@ -1,488 +0,0 @@
1/*
2 * Intel Wireless Multicomm 3200 WiFi driver
3 *
4 * Copyright (C) 2009 Intel Corporation <ilw@linux.intel.com>
5 * Samuel Ortiz <samuel.ortiz@intel.com>
6 * Zhu Yi <yi.zhu@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 */
23
24#include <linux/slab.h>
25#include <linux/kernel.h>
26#include <linux/bitops.h>
27#include <linux/debugfs.h>
28#include <linux/export.h>
29
30#include "iwm.h"
31#include "bus.h"
32#include "rx.h"
33#include "debug.h"
34
35static struct {
36 u8 id;
37 char *name;
38} iwm_debug_module[__IWM_DM_NR] = {
39 {IWM_DM_BOOT, "boot"},
40 {IWM_DM_FW, "fw"},
41 {IWM_DM_SDIO, "sdio"},
42 {IWM_DM_NTF, "ntf"},
43 {IWM_DM_RX, "rx"},
44 {IWM_DM_TX, "tx"},
45 {IWM_DM_MLME, "mlme"},
46 {IWM_DM_CMD, "cmd"},
47 {IWM_DM_WEXT, "wext"},
48};
49
50#define add_dbg_module(dbg, name, id, initlevel) \
51do { \
52 dbg.dbg_module[id] = (initlevel); \
53 dbg.dbg_module_dentries[id] = \
54 debugfs_create_x8(name, 0600, \
55 dbg.dbgdir, \
56 &(dbg.dbg_module[id])); \
57} while (0)
58
59static int iwm_debugfs_u32_read(void *data, u64 *val)
60{
61 struct iwm_priv *iwm = data;
62
63 *val = iwm->dbg.dbg_level;
64 return 0;
65}
66
67static int iwm_debugfs_dbg_level_write(void *data, u64 val)
68{
69 struct iwm_priv *iwm = data;
70 int i;
71
72 iwm->dbg.dbg_level = val;
73
74 for (i = 0; i < __IWM_DM_NR; i++)
75 iwm->dbg.dbg_module[i] = val;
76
77 return 0;
78}
79DEFINE_SIMPLE_ATTRIBUTE(fops_iwm_dbg_level,
80 iwm_debugfs_u32_read, iwm_debugfs_dbg_level_write,
81 "%llu\n");
82
83static int iwm_debugfs_dbg_modules_write(void *data, u64 val)
84{
85 struct iwm_priv *iwm = data;
86 int i, bit;
87
88 iwm->dbg.dbg_modules = val;
89
90 for (i = 0; i < __IWM_DM_NR; i++)
91 iwm->dbg.dbg_module[i] = 0;
92
93 for_each_set_bit(bit, &iwm->dbg.dbg_modules, __IWM_DM_NR)
94 iwm->dbg.dbg_module[bit] = iwm->dbg.dbg_level;
95
96 return 0;
97}
98DEFINE_SIMPLE_ATTRIBUTE(fops_iwm_dbg_modules,
99 iwm_debugfs_u32_read, iwm_debugfs_dbg_modules_write,
100 "%llu\n");
101
102
103static ssize_t iwm_debugfs_txq_read(struct file *filp, char __user *buffer,
104 size_t count, loff_t *ppos)
105{
106 struct iwm_priv *iwm = filp->private_data;
107 char *buf;
108 int i, buf_len = 4096;
109 size_t len = 0;
110 ssize_t ret;
111
112 if (*ppos != 0)
113 return 0;
114 if (count < sizeof(buf))
115 return -ENOSPC;
116
117 buf = kzalloc(buf_len, GFP_KERNEL);
118 if (!buf)
119 return -ENOMEM;
120
121 for (i = 0; i < IWM_TX_QUEUES; i++) {
122 struct iwm_tx_queue *txq = &iwm->txq[i];
123 struct sk_buff *skb;
124 int j;
125 unsigned long flags;
126
127 spin_lock_irqsave(&txq->queue.lock, flags);
128
129 skb = (struct sk_buff *)&txq->queue;
130
131 len += snprintf(buf + len, buf_len - len, "TXQ #%d\n", i);
132 len += snprintf(buf + len, buf_len - len, "\tStopped: %d\n",
133 __netif_subqueue_stopped(iwm_to_ndev(iwm),
134 txq->id));
135 len += snprintf(buf + len, buf_len - len, "\tConcat count:%d\n",
136 txq->concat_count);
137 len += snprintf(buf + len, buf_len - len, "\tQueue len: %d\n",
138 skb_queue_len(&txq->queue));
139 for (j = 0; j < skb_queue_len(&txq->queue); j++) {
140 struct iwm_tx_info *tx_info;
141
142 skb = skb->next;
143 tx_info = skb_to_tx_info(skb);
144
145 len += snprintf(buf + len, buf_len - len,
146 "\tSKB #%d\n", j);
147 len += snprintf(buf + len, buf_len - len,
148 "\t\tsta: %d\n", tx_info->sta);
149 len += snprintf(buf + len, buf_len - len,
150 "\t\tcolor: %d\n", tx_info->color);
151 len += snprintf(buf + len, buf_len - len,
152 "\t\ttid: %d\n", tx_info->tid);
153 }
154
155 spin_unlock_irqrestore(&txq->queue.lock, flags);
156
157 spin_lock_irqsave(&txq->stopped_queue.lock, flags);
158
159 len += snprintf(buf + len, buf_len - len,
160 "\tStopped Queue len: %d\n",
161 skb_queue_len(&txq->stopped_queue));
162 for (j = 0; j < skb_queue_len(&txq->stopped_queue); j++) {
163 struct iwm_tx_info *tx_info;
164
165 skb = skb->next;
166 tx_info = skb_to_tx_info(skb);
167
168 len += snprintf(buf + len, buf_len - len,
169 "\tSKB #%d\n", j);
170 len += snprintf(buf + len, buf_len - len,
171 "\t\tsta: %d\n", tx_info->sta);
172 len += snprintf(buf + len, buf_len - len,
173 "\t\tcolor: %d\n", tx_info->color);
174 len += snprintf(buf + len, buf_len - len,
175 "\t\ttid: %d\n", tx_info->tid);
176 }
177
178 spin_unlock_irqrestore(&txq->stopped_queue.lock, flags);
179 }
180
181 ret = simple_read_from_buffer(buffer, len, ppos, buf, buf_len);
182 kfree(buf);
183
184 return ret;
185}
186
187static ssize_t iwm_debugfs_tx_credit_read(struct file *filp,
188 char __user *buffer,
189 size_t count, loff_t *ppos)
190{
191 struct iwm_priv *iwm = filp->private_data;
192 struct iwm_tx_credit *credit = &iwm->tx_credit;
193 char *buf;
194 int i, buf_len = 4096;
195 size_t len = 0;
196 ssize_t ret;
197
198 if (*ppos != 0)
199 return 0;
200 if (count < sizeof(buf))
201 return -ENOSPC;
202
203 buf = kzalloc(buf_len, GFP_KERNEL);
204 if (!buf)
205 return -ENOMEM;
206
207 len += snprintf(buf + len, buf_len - len,
208 "NR pools: %d\n", credit->pool_nr);
209 len += snprintf(buf + len, buf_len - len,
210 "pools map: 0x%lx\n", credit->full_pools_map);
211
212 len += snprintf(buf + len, buf_len - len, "\n### POOLS ###\n");
213 for (i = 0; i < IWM_MACS_OUT_GROUPS; i++) {
214 len += snprintf(buf + len, buf_len - len,
215 "pools entry #%d\n", i);
216 len += snprintf(buf + len, buf_len - len,
217 "\tid: %d\n",
218 credit->pools[i].id);
219 len += snprintf(buf + len, buf_len - len,
220 "\tsid: %d\n",
221 credit->pools[i].sid);
222 len += snprintf(buf + len, buf_len - len,
223 "\tmin_pages: %d\n",
224 credit->pools[i].min_pages);
225 len += snprintf(buf + len, buf_len - len,
226 "\tmax_pages: %d\n",
227 credit->pools[i].max_pages);
228 len += snprintf(buf + len, buf_len - len,
229 "\talloc_pages: %d\n",
230 credit->pools[i].alloc_pages);
231 len += snprintf(buf + len, buf_len - len,
232 "\tfreed_pages: %d\n",
233 credit->pools[i].total_freed_pages);
234 }
235
236 len += snprintf(buf + len, buf_len - len, "\n### SPOOLS ###\n");
237 for (i = 0; i < IWM_MACS_OUT_SGROUPS; i++) {
238 len += snprintf(buf + len, buf_len - len,
239 "spools entry #%d\n", i);
240 len += snprintf(buf + len, buf_len - len,
241 "\tid: %d\n",
242 credit->spools[i].id);
243 len += snprintf(buf + len, buf_len - len,
244 "\tmax_pages: %d\n",
245 credit->spools[i].max_pages);
246 len += snprintf(buf + len, buf_len - len,
247 "\talloc_pages: %d\n",
248 credit->spools[i].alloc_pages);
249
250 }
251
252 ret = simple_read_from_buffer(buffer, len, ppos, buf, buf_len);
253 kfree(buf);
254
255 return ret;
256}
257
258static ssize_t iwm_debugfs_rx_ticket_read(struct file *filp,
259 char __user *buffer,
260 size_t count, loff_t *ppos)
261{
262 struct iwm_priv *iwm = filp->private_data;
263 struct iwm_rx_ticket_node *ticket;
264 char *buf;
265 int buf_len = 4096, i;
266 size_t len = 0;
267 ssize_t ret;
268
269 if (*ppos != 0)
270 return 0;
271 if (count < sizeof(buf))
272 return -ENOSPC;
273
274 buf = kzalloc(buf_len, GFP_KERNEL);
275 if (!buf)
276 return -ENOMEM;
277
278 spin_lock(&iwm->ticket_lock);
279 list_for_each_entry(ticket, &iwm->rx_tickets, node) {
280 len += snprintf(buf + len, buf_len - len, "Ticket #%d\n",
281 ticket->ticket->id);
282 len += snprintf(buf + len, buf_len - len, "\taction: 0x%x\n",
283 ticket->ticket->action);
284 len += snprintf(buf + len, buf_len - len, "\tflags: 0x%x\n",
285 ticket->ticket->flags);
286 }
287 spin_unlock(&iwm->ticket_lock);
288
289 for (i = 0; i < IWM_RX_ID_HASH; i++) {
290 struct iwm_rx_packet *packet;
291 struct list_head *pkt_list = &iwm->rx_packets[i];
292
293 if (!list_empty(pkt_list)) {
294 len += snprintf(buf + len, buf_len - len,
295 "Packet hash #%d\n", i);
296 spin_lock(&iwm->packet_lock[i]);
297 list_for_each_entry(packet, pkt_list, node) {
298 len += snprintf(buf + len, buf_len - len,
299 "\tPacket id: %d\n",
300 packet->id);
301 len += snprintf(buf + len, buf_len - len,
302 "\tPacket length: %lu\n",
303 packet->pkt_size);
304 }
305 spin_unlock(&iwm->packet_lock[i]);
306 }
307 }
308
309 ret = simple_read_from_buffer(buffer, len, ppos, buf, buf_len);
310 kfree(buf);
311
312 return ret;
313}
314
315static ssize_t iwm_debugfs_fw_err_read(struct file *filp,
316 char __user *buffer,
317 size_t count, loff_t *ppos)
318{
319
320 struct iwm_priv *iwm = filp->private_data;
321 char buf[512];
322 int buf_len = 512;
323 size_t len = 0;
324
325 if (*ppos != 0)
326 return 0;
327 if (count < sizeof(buf))
328 return -ENOSPC;
329
330 if (!iwm->last_fw_err)
331 return -ENOMEM;
332
333 if (iwm->last_fw_err->line_num == 0)
334 goto out;
335
336 len += snprintf(buf + len, buf_len - len, "%cMAC FW ERROR:\n",
337 (le32_to_cpu(iwm->last_fw_err->category) == UMAC_SYS_ERR_CAT_LMAC)
338 ? 'L' : 'U');
339 len += snprintf(buf + len, buf_len - len,
340 "\tCategory: %d\n",
341 le32_to_cpu(iwm->last_fw_err->category));
342
343 len += snprintf(buf + len, buf_len - len,
344 "\tStatus: 0x%x\n",
345 le32_to_cpu(iwm->last_fw_err->status));
346
347 len += snprintf(buf + len, buf_len - len,
348 "\tPC: 0x%x\n",
349 le32_to_cpu(iwm->last_fw_err->pc));
350
351 len += snprintf(buf + len, buf_len - len,
352 "\tblink1: %d\n",
353 le32_to_cpu(iwm->last_fw_err->blink1));
354
355 len += snprintf(buf + len, buf_len - len,
356 "\tblink2: %d\n",
357 le32_to_cpu(iwm->last_fw_err->blink2));
358
359 len += snprintf(buf + len, buf_len - len,
360 "\tilink1: %d\n",
361 le32_to_cpu(iwm->last_fw_err->ilink1));
362
363 len += snprintf(buf + len, buf_len - len,
364 "\tilink2: %d\n",
365 le32_to_cpu(iwm->last_fw_err->ilink2));
366
367 len += snprintf(buf + len, buf_len - len,
368 "\tData1: 0x%x\n",
369 le32_to_cpu(iwm->last_fw_err->data1));
370
371 len += snprintf(buf + len, buf_len - len,
372 "\tData2: 0x%x\n",
373 le32_to_cpu(iwm->last_fw_err->data2));
374
375 len += snprintf(buf + len, buf_len - len,
376 "\tLine number: %d\n",
377 le32_to_cpu(iwm->last_fw_err->line_num));
378
379 len += snprintf(buf + len, buf_len - len,
380 "\tUMAC status: 0x%x\n",
381 le32_to_cpu(iwm->last_fw_err->umac_status));
382
383 len += snprintf(buf + len, buf_len - len,
384 "\tLMAC status: 0x%x\n",
385 le32_to_cpu(iwm->last_fw_err->lmac_status));
386
387 len += snprintf(buf + len, buf_len - len,
388 "\tSDIO status: 0x%x\n",
389 le32_to_cpu(iwm->last_fw_err->sdio_status));
390
391out:
392
393 return simple_read_from_buffer(buffer, len, ppos, buf, buf_len);
394}
395
396static const struct file_operations iwm_debugfs_txq_fops = {
397 .owner = THIS_MODULE,
398 .open = simple_open,
399 .read = iwm_debugfs_txq_read,
400 .llseek = default_llseek,
401};
402
403static const struct file_operations iwm_debugfs_tx_credit_fops = {
404 .owner = THIS_MODULE,
405 .open = simple_open,
406 .read = iwm_debugfs_tx_credit_read,
407 .llseek = default_llseek,
408};
409
410static const struct file_operations iwm_debugfs_rx_ticket_fops = {
411 .owner = THIS_MODULE,
412 .open = simple_open,
413 .read = iwm_debugfs_rx_ticket_read,
414 .llseek = default_llseek,
415};
416
417static const struct file_operations iwm_debugfs_fw_err_fops = {
418 .owner = THIS_MODULE,
419 .open = simple_open,
420 .read = iwm_debugfs_fw_err_read,
421 .llseek = default_llseek,
422};
423
424void iwm_debugfs_init(struct iwm_priv *iwm)
425{
426 int i;
427
428 iwm->dbg.rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
429 iwm->dbg.devdir = debugfs_create_dir(wiphy_name(iwm_to_wiphy(iwm)),
430 iwm->dbg.rootdir);
431 iwm->dbg.dbgdir = debugfs_create_dir("debug", iwm->dbg.devdir);
432 iwm->dbg.rxdir = debugfs_create_dir("rx", iwm->dbg.devdir);
433 iwm->dbg.txdir = debugfs_create_dir("tx", iwm->dbg.devdir);
434 iwm->dbg.busdir = debugfs_create_dir("bus", iwm->dbg.devdir);
435 if (iwm->bus_ops->debugfs_init)
436 iwm->bus_ops->debugfs_init(iwm, iwm->dbg.busdir);
437
438 iwm->dbg.dbg_level = IWM_DL_NONE;
439 iwm->dbg.dbg_level_dentry =
440 debugfs_create_file("level", 0200, iwm->dbg.dbgdir, iwm,
441 &fops_iwm_dbg_level);
442
443 iwm->dbg.dbg_modules = IWM_DM_DEFAULT;
444 iwm->dbg.dbg_modules_dentry =
445 debugfs_create_file("modules", 0200, iwm->dbg.dbgdir, iwm,
446 &fops_iwm_dbg_modules);
447
448 for (i = 0; i < __IWM_DM_NR; i++)
449 add_dbg_module(iwm->dbg, iwm_debug_module[i].name,
450 iwm_debug_module[i].id, IWM_DL_DEFAULT);
451
452 iwm->dbg.txq_dentry = debugfs_create_file("queues", 0200,
453 iwm->dbg.txdir, iwm,
454 &iwm_debugfs_txq_fops);
455 iwm->dbg.tx_credit_dentry = debugfs_create_file("credits", 0200,
456 iwm->dbg.txdir, iwm,
457 &iwm_debugfs_tx_credit_fops);
458 iwm->dbg.rx_ticket_dentry = debugfs_create_file("tickets", 0200,
459 iwm->dbg.rxdir, iwm,
460 &iwm_debugfs_rx_ticket_fops);
461 iwm->dbg.fw_err_dentry = debugfs_create_file("last_fw_err", 0200,
462 iwm->dbg.dbgdir, iwm,
463 &iwm_debugfs_fw_err_fops);
464}
465
466void iwm_debugfs_exit(struct iwm_priv *iwm)
467{
468 int i;
469
470 for (i = 0; i < __IWM_DM_NR; i++)
471 debugfs_remove(iwm->dbg.dbg_module_dentries[i]);
472
473 debugfs_remove(iwm->dbg.dbg_modules_dentry);
474 debugfs_remove(iwm->dbg.dbg_level_dentry);
475 debugfs_remove(iwm->dbg.txq_dentry);
476 debugfs_remove(iwm->dbg.tx_credit_dentry);
477 debugfs_remove(iwm->dbg.rx_ticket_dentry);
478 debugfs_remove(iwm->dbg.fw_err_dentry);
479 if (iwm->bus_ops->debugfs_exit)
480 iwm->bus_ops->debugfs_exit(iwm);
481
482 debugfs_remove(iwm->dbg.busdir);
483 debugfs_remove(iwm->dbg.dbgdir);
484 debugfs_remove(iwm->dbg.txdir);
485 debugfs_remove(iwm->dbg.rxdir);
486 debugfs_remove(iwm->dbg.devdir);
487 debugfs_remove(iwm->dbg.rootdir);
488}
diff --git a/drivers/net/wireless/iwmc3200wifi/eeprom.c b/drivers/net/wireless/iwmc3200wifi/eeprom.c
deleted file mode 100644
index e80e776b74f7..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/eeprom.c
+++ /dev/null
@@ -1,234 +0,0 @@
1/*
2 * Intel Wireless Multicomm 3200 WiFi driver
3 *
4 * Copyright (C) 2009 Intel Corporation. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 *
33 * Intel Corporation <ilw@linux.intel.com>
34 * Samuel Ortiz <samuel.ortiz@intel.com>
35 * Zhu Yi <yi.zhu@intel.com>
36 *
37 */
38
39#include <linux/kernel.h>
40#include <linux/slab.h>
41
42#include "iwm.h"
43#include "umac.h"
44#include "commands.h"
45#include "eeprom.h"
46
47static struct iwm_eeprom_entry eeprom_map[] = {
48 [IWM_EEPROM_SIG] =
49 {"Signature", IWM_EEPROM_SIG_OFF, IWM_EEPROM_SIG_LEN},
50
51 [IWM_EEPROM_VERSION] =
52 {"Version", IWM_EEPROM_VERSION_OFF, IWM_EEPROM_VERSION_LEN},
53
54 [IWM_EEPROM_OEM_HW_VERSION] =
55 {"OEM HW version", IWM_EEPROM_OEM_HW_VERSION_OFF,
56 IWM_EEPROM_OEM_HW_VERSION_LEN},
57
58 [IWM_EEPROM_MAC_VERSION] =
59 {"MAC version", IWM_EEPROM_MAC_VERSION_OFF, IWM_EEPROM_MAC_VERSION_LEN},
60
61 [IWM_EEPROM_CARD_ID] =
62 {"Card ID", IWM_EEPROM_CARD_ID_OFF, IWM_EEPROM_CARD_ID_LEN},
63
64 [IWM_EEPROM_RADIO_CONF] =
65 {"Radio config", IWM_EEPROM_RADIO_CONF_OFF, IWM_EEPROM_RADIO_CONF_LEN},
66
67 [IWM_EEPROM_SKU_CAP] =
68 {"SKU capabilities", IWM_EEPROM_SKU_CAP_OFF, IWM_EEPROM_SKU_CAP_LEN},
69
70 [IWM_EEPROM_FAT_CHANNELS_CAP] =
71 {"HT channels capabilities", IWM_EEPROM_FAT_CHANNELS_CAP_OFF,
72 IWM_EEPROM_FAT_CHANNELS_CAP_LEN},
73
74 [IWM_EEPROM_CALIB_RXIQ_OFFSET] =
75 {"RX IQ offset", IWM_EEPROM_CALIB_RXIQ_OFF, IWM_EEPROM_INDIRECT_LEN},
76
77 [IWM_EEPROM_CALIB_RXIQ] =
78 {"Calib RX IQ", 0, IWM_EEPROM_CALIB_RXIQ_LEN},
79};
80
81
82static int iwm_eeprom_read(struct iwm_priv *iwm, u8 eeprom_id)
83{
84 int ret;
85 u32 entry_size, chunk_size, data_offset = 0, addr_offset = 0;
86 u32 addr;
87 struct iwm_udma_wifi_cmd udma_cmd;
88 struct iwm_umac_cmd umac_cmd;
89 struct iwm_umac_cmd_eeprom_proxy eeprom_cmd;
90
91 if (eeprom_id > (IWM_EEPROM_LAST - 1))
92 return -EINVAL;
93
94 entry_size = eeprom_map[eeprom_id].length;
95
96 if (eeprom_id >= IWM_EEPROM_INDIRECT_DATA) {
97 /* indirect data */
98 u32 off_id = eeprom_id - IWM_EEPROM_INDIRECT_DATA +
99 IWM_EEPROM_INDIRECT_OFFSET;
100
101 eeprom_map[eeprom_id].offset =
102 *(u16 *)(iwm->eeprom + eeprom_map[off_id].offset) << 1;
103 }
104
105 addr = eeprom_map[eeprom_id].offset;
106
107 udma_cmd.eop = 1;
108 udma_cmd.credit_group = 0x4;
109 udma_cmd.ra_tid = UMAC_HDI_ACT_TBL_IDX_HOST_CMD;
110 udma_cmd.lmac_offset = 0;
111
112 umac_cmd.id = UMAC_CMD_OPCODE_EEPROM_PROXY;
113 umac_cmd.resp = 1;
114
115 while (entry_size > 0) {
116 chunk_size = min_t(u32, entry_size, IWM_MAX_EEPROM_DATA_LEN);
117
118 eeprom_cmd.hdr.type =
119 cpu_to_le32(IWM_UMAC_CMD_EEPROM_TYPE_READ);
120 eeprom_cmd.hdr.offset = cpu_to_le32(addr + addr_offset);
121 eeprom_cmd.hdr.len = cpu_to_le32(chunk_size);
122
123 ret = iwm_hal_send_umac_cmd(iwm, &udma_cmd,
124 &umac_cmd, &eeprom_cmd,
125 sizeof(struct iwm_umac_cmd_eeprom_proxy));
126 if (ret < 0) {
127 IWM_ERR(iwm, "Couldn't read eeprom\n");
128 return ret;
129 }
130
131 ret = iwm_notif_handle(iwm, UMAC_CMD_OPCODE_EEPROM_PROXY,
132 IWM_SRC_UMAC, 2*HZ);
133 if (ret < 0) {
134 IWM_ERR(iwm, "Did not get any eeprom answer\n");
135 return ret;
136 }
137
138 data_offset += chunk_size;
139 addr_offset += chunk_size;
140 entry_size -= chunk_size;
141 }
142
143 return 0;
144}
145
146u8 *iwm_eeprom_access(struct iwm_priv *iwm, u8 eeprom_id)
147{
148 if (!iwm->eeprom)
149 return ERR_PTR(-ENODEV);
150
151 return iwm->eeprom + eeprom_map[eeprom_id].offset;
152}
153
154int iwm_eeprom_fat_channels(struct iwm_priv *iwm)
155{
156 struct wiphy *wiphy = iwm_to_wiphy(iwm);
157 struct ieee80211_supported_band *band;
158 u16 *channels, i;
159
160 channels = (u16 *)iwm_eeprom_access(iwm, IWM_EEPROM_FAT_CHANNELS_CAP);
161 if (IS_ERR(channels))
162 return PTR_ERR(channels);
163
164 band = wiphy->bands[IEEE80211_BAND_2GHZ];
165 band->ht_cap.ht_supported = true;
166
167 for (i = 0; i < IWM_EEPROM_FAT_CHANNELS_24; i++)
168 if (!(channels[i] & IWM_EEPROM_FAT_CHANNEL_ENABLED))
169 band->ht_cap.ht_supported = false;
170
171 band = wiphy->bands[IEEE80211_BAND_5GHZ];
172 band->ht_cap.ht_supported = true;
173 for (i = IWM_EEPROM_FAT_CHANNELS_24; i < IWM_EEPROM_FAT_CHANNELS; i++)
174 if (!(channels[i] & IWM_EEPROM_FAT_CHANNEL_ENABLED))
175 band->ht_cap.ht_supported = false;
176
177 return 0;
178}
179
180u32 iwm_eeprom_wireless_mode(struct iwm_priv *iwm)
181{
182 u16 sku_cap;
183 u32 wireless_mode = 0;
184
185 sku_cap = *((u16 *)iwm_eeprom_access(iwm, IWM_EEPROM_SKU_CAP));
186
187 if (sku_cap & IWM_EEPROM_SKU_CAP_BAND_24GHZ)
188 wireless_mode |= WIRELESS_MODE_11G;
189
190 if (sku_cap & IWM_EEPROM_SKU_CAP_BAND_52GHZ)
191 wireless_mode |= WIRELESS_MODE_11A;
192
193 if (sku_cap & IWM_EEPROM_SKU_CAP_11N_ENABLE)
194 wireless_mode |= WIRELESS_MODE_11N;
195
196 return wireless_mode;
197}
198
199
200int iwm_eeprom_init(struct iwm_priv *iwm)
201{
202 int i, ret = 0;
203 char name[32];
204
205 iwm->eeprom = kzalloc(IWM_EEPROM_LEN, GFP_KERNEL);
206 if (!iwm->eeprom)
207 return -ENOMEM;
208
209 for (i = IWM_EEPROM_FIRST; i < IWM_EEPROM_LAST; i++) {
210 ret = iwm_eeprom_read(iwm, i);
211 if (ret < 0) {
212 IWM_ERR(iwm, "Couldn't read eeprom entry #%d: %s\n",
213 i, eeprom_map[i].name);
214 break;
215 }
216 }
217
218 IWM_DBG_BOOT(iwm, DBG, "EEPROM dump:\n");
219 for (i = IWM_EEPROM_FIRST; i < IWM_EEPROM_LAST; i++) {
220 memset(name, 0, 32);
221 sprintf(name, "%s: ", eeprom_map[i].name);
222
223 IWM_HEXDUMP(iwm, DBG, BOOT, name,
224 iwm->eeprom + eeprom_map[i].offset,
225 eeprom_map[i].length);
226 }
227
228 return ret;
229}
230
231void iwm_eeprom_exit(struct iwm_priv *iwm)
232{
233 kfree(iwm->eeprom);
234}
diff --git a/drivers/net/wireless/iwmc3200wifi/eeprom.h b/drivers/net/wireless/iwmc3200wifi/eeprom.h
deleted file mode 100644
index 4e3a3fdab0d3..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/eeprom.h
+++ /dev/null
@@ -1,127 +0,0 @@
1/*
2 * Intel Wireless Multicomm 3200 WiFi driver
3 *
4 * Copyright (C) 2009 Intel Corporation. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 *
33 * Intel Corporation <ilw@linux.intel.com>
34 * Samuel Ortiz <samuel.ortiz@intel.com>
35 * Zhu Yi <yi.zhu@intel.com>
36 *
37 */
38
39#ifndef __IWM_EEPROM_H__
40#define __IWM_EEPROM_H__
41
42enum {
43 IWM_EEPROM_SIG = 0,
44 IWM_EEPROM_FIRST = IWM_EEPROM_SIG,
45 IWM_EEPROM_VERSION,
46 IWM_EEPROM_OEM_HW_VERSION,
47 IWM_EEPROM_MAC_VERSION,
48 IWM_EEPROM_CARD_ID,
49 IWM_EEPROM_RADIO_CONF,
50 IWM_EEPROM_SKU_CAP,
51 IWM_EEPROM_FAT_CHANNELS_CAP,
52
53 IWM_EEPROM_INDIRECT_OFFSET,
54 IWM_EEPROM_CALIB_RXIQ_OFFSET = IWM_EEPROM_INDIRECT_OFFSET,
55
56 IWM_EEPROM_INDIRECT_DATA,
57 IWM_EEPROM_CALIB_RXIQ = IWM_EEPROM_INDIRECT_DATA,
58
59 IWM_EEPROM_LAST,
60};
61
62#define IWM_EEPROM_SIG_OFF 0x00
63#define IWM_EEPROM_VERSION_OFF (0x54 << 1)
64#define IWM_EEPROM_OEM_HW_VERSION_OFF (0x56 << 1)
65#define IWM_EEPROM_MAC_VERSION_OFF (0x30 << 1)
66#define IWM_EEPROM_CARD_ID_OFF (0x5d << 1)
67#define IWM_EEPROM_RADIO_CONF_OFF (0x58 << 1)
68#define IWM_EEPROM_SKU_CAP_OFF (0x55 << 1)
69#define IWM_EEPROM_CALIB_CONFIG_OFF (0x7c << 1)
70#define IWM_EEPROM_FAT_CHANNELS_CAP_OFF (0xde << 1)
71
72#define IWM_EEPROM_SIG_LEN 4
73#define IWM_EEPROM_VERSION_LEN 2
74#define IWM_EEPROM_OEM_HW_VERSION_LEN 2
75#define IWM_EEPROM_MAC_VERSION_LEN 1
76#define IWM_EEPROM_CARD_ID_LEN 2
77#define IWM_EEPROM_RADIO_CONF_LEN 2
78#define IWM_EEPROM_SKU_CAP_LEN 2
79#define IWM_EEPROM_FAT_CHANNELS_CAP_LEN 40
80#define IWM_EEPROM_INDIRECT_LEN 2
81
82#define IWM_MAX_EEPROM_DATA_LEN 240
83#define IWM_EEPROM_LEN 0x800
84
85#define IWM_EEPROM_MIN_ALLOWED_VERSION 0x0610
86#define IWM_EEPROM_MAX_ALLOWED_VERSION 0x0700
87#define IWM_EEPROM_CURRENT_VERSION 0x0612
88
89#define IWM_EEPROM_SKU_CAP_BAND_24GHZ (1 << 4)
90#define IWM_EEPROM_SKU_CAP_BAND_52GHZ (1 << 5)
91#define IWM_EEPROM_SKU_CAP_11N_ENABLE (1 << 6)
92
93#define IWM_EEPROM_FAT_CHANNELS 20
94/* 2.4 gHz FAT primary channels: 1, 2, 3, 4, 5, 6, 7, 8, 9 */
95#define IWM_EEPROM_FAT_CHANNELS_24 9
96/* 5.2 gHz FAT primary channels: 36,44,52,60,100,108,116,124,132,149,157 */
97#define IWM_EEPROM_FAT_CHANNELS_52 11
98
99#define IWM_EEPROM_FAT_CHANNEL_ENABLED (1 << 0)
100
101enum {
102 IWM_EEPROM_CALIB_CAL_HDR,
103 IWM_EEPROM_CALIB_TX_POWER,
104 IWM_EEPROM_CALIB_XTAL,
105 IWM_EEPROM_CALIB_TEMPERATURE,
106 IWM_EEPROM_CALIB_RX_BB_FILTER,
107 IWM_EEPROM_CALIB_RX_IQ,
108 IWM_EEPROM_CALIB_MAX,
109};
110
111#define IWM_EEPROM_CALIB_RXIQ_OFF (IWM_EEPROM_CALIB_CONFIG_OFF + \
112 (IWM_EEPROM_CALIB_RX_IQ << 1))
113#define IWM_EEPROM_CALIB_RXIQ_LEN sizeof(struct iwm_lmac_calib_rxiq)
114
115struct iwm_eeprom_entry {
116 char *name;
117 u32 offset;
118 u32 length;
119};
120
121int iwm_eeprom_init(struct iwm_priv *iwm);
122void iwm_eeprom_exit(struct iwm_priv *iwm);
123u8 *iwm_eeprom_access(struct iwm_priv *iwm, u8 eeprom_id);
124int iwm_eeprom_fat_channels(struct iwm_priv *iwm);
125u32 iwm_eeprom_wireless_mode(struct iwm_priv *iwm);
126
127#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/fw.c b/drivers/net/wireless/iwmc3200wifi/fw.c
deleted file mode 100644
index 6f1afe6bbc8c..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/fw.c
+++ /dev/null
@@ -1,416 +0,0 @@
1/*
2 * Intel Wireless Multicomm 3200 WiFi driver
3 *
4 * Copyright (C) 2009 Intel Corporation. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 *
33 * Intel Corporation <ilw@linux.intel.com>
34 * Samuel Ortiz <samuel.ortiz@intel.com>
35 * Zhu Yi <yi.zhu@intel.com>
36 *
37 */
38
39#include <linux/kernel.h>
40#include <linux/firmware.h>
41
42#include "iwm.h"
43#include "bus.h"
44#include "hal.h"
45#include "umac.h"
46#include "debug.h"
47#include "fw.h"
48#include "commands.h"
49
50static const char fw_barker[] = "*WESTOPFORNOONE*";
51
52/*
53 * @op_code: Op code we're looking for.
54 * @index: There can be several instances of the same opcode within
55 * the firmware. Index specifies which one we're looking for.
56 */
57static int iwm_fw_op_offset(struct iwm_priv *iwm, const struct firmware *fw,
58 u16 op_code, u32 index)
59{
60 int offset = -EINVAL, fw_offset;
61 u32 op_index = 0;
62 const u8 *fw_ptr;
63 struct iwm_fw_hdr_rec *rec;
64
65 fw_offset = 0;
66 fw_ptr = fw->data;
67
68 /* We first need to look for the firmware barker */
69 if (memcmp(fw_ptr, fw_barker, IWM_HDR_BARKER_LEN)) {
70 IWM_ERR(iwm, "No barker string in this FW\n");
71 return -EINVAL;
72 }
73
74 if (fw->size < IWM_HDR_LEN) {
75 IWM_ERR(iwm, "FW is too small (%zu)\n", fw->size);
76 return -EINVAL;
77 }
78
79 fw_offset += IWM_HDR_BARKER_LEN;
80
81 while (fw_offset < fw->size) {
82 rec = (struct iwm_fw_hdr_rec *)(fw_ptr + fw_offset);
83
84 IWM_DBG_FW(iwm, DBG, "FW: op_code: 0x%x, len: %d @ 0x%x\n",
85 rec->op_code, rec->len, fw_offset);
86
87 if (rec->op_code == IWM_HDR_REC_OP_INVALID) {
88 IWM_DBG_FW(iwm, DBG, "Reached INVALID op code\n");
89 break;
90 }
91
92 if (rec->op_code == op_code) {
93 if (op_index == index) {
94 fw_offset += sizeof(struct iwm_fw_hdr_rec);
95 offset = fw_offset;
96 goto out;
97 }
98 op_index++;
99 }
100
101 fw_offset += sizeof(struct iwm_fw_hdr_rec) + rec->len;
102 }
103
104 out:
105 return offset;
106}
107
108static int iwm_load_firmware_chunk(struct iwm_priv *iwm,
109 const struct firmware *fw,
110 struct iwm_fw_img_desc *img_desc)
111{
112 struct iwm_udma_nonwifi_cmd target_cmd;
113 u32 chunk_size;
114 const u8 *chunk_ptr;
115 int ret = 0;
116
117 IWM_DBG_FW(iwm, INFO, "Loading FW chunk: %d bytes @ 0x%x\n",
118 img_desc->length, img_desc->address);
119
120 target_cmd.opcode = UMAC_HDI_OUT_OPCODE_WRITE;
121 target_cmd.handle_by_hw = 1;
122 target_cmd.op2 = 0;
123 target_cmd.resp = 0;
124 target_cmd.eop = 1;
125
126 chunk_size = img_desc->length;
127 chunk_ptr = fw->data + img_desc->offset;
128
129 while (chunk_size > 0) {
130 u32 tmp_chunk_size;
131
132 tmp_chunk_size = min_t(u32, chunk_size,
133 IWM_MAX_NONWIFI_CMD_BUFF_SIZE);
134
135 target_cmd.addr = cpu_to_le32(img_desc->address +
136 (chunk_ptr - fw->data - img_desc->offset));
137 target_cmd.op1_sz = cpu_to_le32(tmp_chunk_size);
138
139 IWM_DBG_FW(iwm, DBG, "\t%d bytes @ 0x%x\n",
140 tmp_chunk_size, target_cmd.addr);
141
142 ret = iwm_hal_send_target_cmd(iwm, &target_cmd, chunk_ptr);
143 if (ret < 0) {
144 IWM_ERR(iwm, "Couldn't load FW chunk\n");
145 break;
146 }
147
148 chunk_size -= tmp_chunk_size;
149 chunk_ptr += tmp_chunk_size;
150 }
151
152 return ret;
153}
154/*
155 * To load a fw image to the target, we basically go through the
156 * fw, looking for OP_MEM_DESC records. Once we found one, we
157 * pass it to iwm_load_firmware_chunk().
158 * The OP_MEM_DESC records contain the actuall memory chunk to be
159 * sent, but also the destination address.
160 */
161static int iwm_load_img(struct iwm_priv *iwm, const char *img_name)
162{
163 const struct firmware *fw;
164 struct iwm_fw_img_desc *img_desc;
165 struct iwm_fw_img_ver *ver;
166 int ret = 0, fw_offset;
167 u32 opcode_idx = 0, build_date;
168 char *build_tag;
169
170 ret = request_firmware(&fw, img_name, iwm_to_dev(iwm));
171 if (ret) {
172 IWM_ERR(iwm, "Request firmware failed");
173 return ret;
174 }
175
176 IWM_DBG_FW(iwm, INFO, "Start to load FW %s\n", img_name);
177
178 while (1) {
179 fw_offset = iwm_fw_op_offset(iwm, fw,
180 IWM_HDR_REC_OP_MEM_DESC,
181 opcode_idx);
182 if (fw_offset < 0)
183 break;
184
185 img_desc = (struct iwm_fw_img_desc *)(fw->data + fw_offset);
186 ret = iwm_load_firmware_chunk(iwm, fw, img_desc);
187 if (ret < 0)
188 goto err_release_fw;
189 opcode_idx++;
190 }
191
192 /* Read firmware version */
193 fw_offset = iwm_fw_op_offset(iwm, fw, IWM_HDR_REC_OP_SW_VER, 0);
194 if (fw_offset < 0)
195 goto err_release_fw;
196
197 ver = (struct iwm_fw_img_ver *)(fw->data + fw_offset);
198
199 /* Read build tag */
200 fw_offset = iwm_fw_op_offset(iwm, fw, IWM_HDR_REC_OP_BUILD_TAG, 0);
201 if (fw_offset < 0)
202 goto err_release_fw;
203
204 build_tag = (char *)(fw->data + fw_offset);
205
206 /* Read build date */
207 fw_offset = iwm_fw_op_offset(iwm, fw, IWM_HDR_REC_OP_BUILD_DATE, 0);
208 if (fw_offset < 0)
209 goto err_release_fw;
210
211 build_date = *(u32 *)(fw->data + fw_offset);
212
213 IWM_INFO(iwm, "%s:\n", img_name);
214 IWM_INFO(iwm, "\tVersion: %02X.%02X\n", ver->major, ver->minor);
215 IWM_INFO(iwm, "\tBuild tag: %s\n", build_tag);
216 IWM_INFO(iwm, "\tBuild date: %x-%x-%x\n",
217 IWM_BUILD_YEAR(build_date), IWM_BUILD_MONTH(build_date),
218 IWM_BUILD_DAY(build_date));
219
220 if (!strcmp(img_name, iwm->bus_ops->umac_name))
221 sprintf(iwm->umac_version, "%02X.%02X",
222 ver->major, ver->minor);
223
224 if (!strcmp(img_name, iwm->bus_ops->lmac_name))
225 sprintf(iwm->lmac_version, "%02X.%02X",
226 ver->major, ver->minor);
227
228 err_release_fw:
229 release_firmware(fw);
230
231 return ret;
232}
233
234static int iwm_load_umac(struct iwm_priv *iwm)
235{
236 struct iwm_udma_nonwifi_cmd target_cmd;
237 int ret;
238
239 ret = iwm_load_img(iwm, iwm->bus_ops->umac_name);
240 if (ret < 0)
241 return ret;
242
243 /* We've loaded the UMAC, we can tell the target to jump there */
244 target_cmd.opcode = UMAC_HDI_OUT_OPCODE_JUMP;
245 target_cmd.addr = cpu_to_le32(UMAC_MU_FW_INST_DATA_12_ADDR);
246 target_cmd.op1_sz = 0;
247 target_cmd.op2 = 0;
248 target_cmd.handle_by_hw = 0;
249 target_cmd.resp = 1 ;
250 target_cmd.eop = 1;
251
252 ret = iwm_hal_send_target_cmd(iwm, &target_cmd, NULL);
253 if (ret < 0)
254 IWM_ERR(iwm, "Couldn't send JMP command\n");
255
256 return ret;
257}
258
259static int iwm_load_lmac(struct iwm_priv *iwm, const char *img_name)
260{
261 int ret;
262
263 ret = iwm_load_img(iwm, img_name);
264 if (ret < 0)
265 return ret;
266
267 return iwm_send_umac_reset(iwm,
268 cpu_to_le32(UMAC_RST_CTRL_FLG_LARC_CLK_EN), 0);
269}
270
271static int iwm_init_calib(struct iwm_priv *iwm, unsigned long cfg_bitmap,
272 unsigned long expected_bitmap, u8 rx_iq_cmd)
273{
274 /* Read RX IQ calibration result from EEPROM */
275 if (test_bit(rx_iq_cmd, &cfg_bitmap)) {
276 iwm_store_rxiq_calib_result(iwm);
277 set_bit(PHY_CALIBRATE_RX_IQ_CMD, &iwm->calib_done_map);
278 }
279
280 iwm_send_prio_table(iwm);
281 iwm_send_init_calib_cfg(iwm, cfg_bitmap);
282
283 while (iwm->calib_done_map != expected_bitmap) {
284 if (iwm_notif_handle(iwm, CALIBRATION_RES_NOTIFICATION,
285 IWM_SRC_LMAC, WAIT_NOTIF_TIMEOUT)) {
286 IWM_DBG_FW(iwm, DBG, "Initial calibration timeout\n");
287 return -ETIMEDOUT;
288 }
289
290 IWM_DBG_FW(iwm, DBG, "Got calibration result. calib_done_map: "
291 "0x%lx, expected calibrations: 0x%lx\n",
292 iwm->calib_done_map, expected_bitmap);
293 }
294
295 return 0;
296}
297
298/*
299 * We currently have to load 3 FWs:
300 * 1) The UMAC (Upper MAC).
301 * 2) The calibration LMAC (Lower MAC).
302 * We then send the calibration init command, so that the device can
303 * run a first calibration round.
304 * 3) The operational LMAC, which replaces the calibration one when it's
305 * done with the first calibration round.
306 *
307 * Once those 3 FWs have been loaded, we send the periodic calibration
308 * command, and then the device is available for regular 802.11 operations.
309 */
310int iwm_load_fw(struct iwm_priv *iwm)
311{
312 unsigned long init_calib_map, periodic_calib_map;
313 unsigned long expected_calib_map;
314 int ret;
315
316 /* We first start downloading the UMAC */
317 ret = iwm_load_umac(iwm);
318 if (ret < 0) {
319 IWM_ERR(iwm, "UMAC loading failed\n");
320 return ret;
321 }
322
323 /* Handle UMAC_ALIVE notification */
324 ret = iwm_notif_handle(iwm, UMAC_NOTIFY_OPCODE_ALIVE, IWM_SRC_UMAC,
325 WAIT_NOTIF_TIMEOUT);
326 if (ret) {
327 IWM_ERR(iwm, "Handle UMAC_ALIVE failed: %d\n", ret);
328 return ret;
329 }
330
331 /* UMAC is alive, we can download the calibration LMAC */
332 ret = iwm_load_lmac(iwm, iwm->bus_ops->calib_lmac_name);
333 if (ret) {
334 IWM_ERR(iwm, "Calibration LMAC loading failed\n");
335 return ret;
336 }
337
338 /* Handle UMAC_INIT_COMPLETE notification */
339 ret = iwm_notif_handle(iwm, UMAC_NOTIFY_OPCODE_INIT_COMPLETE,
340 IWM_SRC_UMAC, WAIT_NOTIF_TIMEOUT);
341 if (ret) {
342 IWM_ERR(iwm, "Handle INIT_COMPLETE failed for calibration "
343 "LMAC: %d\n", ret);
344 return ret;
345 }
346
347 /* Read EEPROM data */
348 ret = iwm_eeprom_init(iwm);
349 if (ret < 0) {
350 IWM_ERR(iwm, "Couldn't init eeprom array\n");
351 return ret;
352 }
353
354 init_calib_map = iwm->conf.calib_map & IWM_CALIB_MAP_INIT_MSK;
355 expected_calib_map = iwm->conf.expected_calib_map &
356 IWM_CALIB_MAP_INIT_MSK;
357 periodic_calib_map = IWM_CALIB_MAP_PER_LMAC(iwm->conf.calib_map);
358
359 ret = iwm_init_calib(iwm, init_calib_map, expected_calib_map,
360 CALIB_CFG_RX_IQ_IDX);
361 if (ret < 0) {
362 /* Let's try the old way */
363 ret = iwm_init_calib(iwm, expected_calib_map,
364 expected_calib_map,
365 PHY_CALIBRATE_RX_IQ_CMD);
366 if (ret < 0) {
367 IWM_ERR(iwm, "Calibration result timeout\n");
368 goto out;
369 }
370 }
371
372 /* Handle LMAC CALIBRATION_COMPLETE notification */
373 ret = iwm_notif_handle(iwm, CALIBRATION_COMPLETE_NOTIFICATION,
374 IWM_SRC_LMAC, WAIT_NOTIF_TIMEOUT);
375 if (ret) {
376 IWM_ERR(iwm, "Wait for CALIBRATION_COMPLETE timeout\n");
377 goto out;
378 }
379
380 IWM_INFO(iwm, "LMAC calibration done: 0x%lx\n", iwm->calib_done_map);
381
382 iwm_send_umac_reset(iwm, cpu_to_le32(UMAC_RST_CTRL_FLG_LARC_RESET), 1);
383
384 ret = iwm_notif_handle(iwm, UMAC_CMD_OPCODE_RESET, IWM_SRC_UMAC,
385 WAIT_NOTIF_TIMEOUT);
386 if (ret) {
387 IWM_ERR(iwm, "Wait for UMAC RESET timeout\n");
388 goto out;
389 }
390
391 /* Download the operational LMAC */
392 ret = iwm_load_lmac(iwm, iwm->bus_ops->lmac_name);
393 if (ret) {
394 IWM_ERR(iwm, "LMAC loading failed\n");
395 goto out;
396 }
397
398 ret = iwm_notif_handle(iwm, UMAC_NOTIFY_OPCODE_INIT_COMPLETE,
399 IWM_SRC_UMAC, WAIT_NOTIF_TIMEOUT);
400 if (ret) {
401 IWM_ERR(iwm, "Handle INIT_COMPLETE failed for LMAC: %d\n", ret);
402 goto out;
403 }
404
405 iwm_send_prio_table(iwm);
406 iwm_send_calib_results(iwm);
407 iwm_send_periodic_calib_cfg(iwm, periodic_calib_map);
408 iwm_send_ct_kill_cfg(iwm, iwm->conf.ct_kill_entry,
409 iwm->conf.ct_kill_exit);
410
411 return 0;
412
413 out:
414 iwm_eeprom_exit(iwm);
415 return ret;
416}
diff --git a/drivers/net/wireless/iwmc3200wifi/fw.h b/drivers/net/wireless/iwmc3200wifi/fw.h
deleted file mode 100644
index c70a3b40dad3..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/fw.h
+++ /dev/null
@@ -1,100 +0,0 @@
1/*
2 * Intel Wireless Multicomm 3200 WiFi driver
3 *
4 * Copyright (C) 2009 Intel Corporation. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 *
33 * Intel Corporation <ilw@linux.intel.com>
34 * Samuel Ortiz <samuel.ortiz@intel.com>
35 * Zhu Yi <yi.zhu@intel.com>
36 *
37 */
38
39#ifndef __IWM_FW_H__
40#define __IWM_FW_H__
41
42/**
43 * struct iwm_fw_hdr_rec - An iwm firmware image is a
44 * concatenation of various records. Each of them is
45 * defined by an ID (aka op code), a length, and the
46 * actual data.
47 * @op_code: The record ID, see IWM_HDR_REC_OP_*
48 *
49 * @len: The record payload length
50 *
51 * @buf: The record payload
52 */
53struct iwm_fw_hdr_rec {
54 u16 op_code;
55 u16 len;
56 u8 buf[0];
57};
58
59/* Header's definitions */
60#define IWM_HDR_LEN (512)
61#define IWM_HDR_BARKER_LEN (16)
62
63/* Header's opcodes */
64#define IWM_HDR_REC_OP_INVALID (0x00)
65#define IWM_HDR_REC_OP_BUILD_DATE (0x01)
66#define IWM_HDR_REC_OP_BUILD_TAG (0x02)
67#define IWM_HDR_REC_OP_SW_VER (0x03)
68#define IWM_HDR_REC_OP_HW_SKU (0x04)
69#define IWM_HDR_REC_OP_BUILD_OPT (0x05)
70#define IWM_HDR_REC_OP_MEM_DESC (0x06)
71#define IWM_HDR_REC_USERDEFS (0x07)
72
73/* Header's records length (in bytes) */
74#define IWM_HDR_REC_LEN_BUILD_DATE (4)
75#define IWM_HDR_REC_LEN_BUILD_TAG (64)
76#define IWM_HDR_REC_LEN_SW_VER (4)
77#define IWM_HDR_REC_LEN_HW_SKU (4)
78#define IWM_HDR_REC_LEN_BUILD_OPT (4)
79#define IWM_HDR_REC_LEN_MEM_DESC (12)
80#define IWM_HDR_REC_LEN_USERDEF (64)
81
82#define IWM_BUILD_YEAR(date) ((date >> 16) & 0xffff)
83#define IWM_BUILD_MONTH(date) ((date >> 8) & 0xff)
84#define IWM_BUILD_DAY(date) (date & 0xff)
85
86struct iwm_fw_img_desc {
87 u32 offset;
88 u32 address;
89 u32 length;
90};
91
92struct iwm_fw_img_ver {
93 u8 minor;
94 u8 major;
95 u16 reserved;
96};
97
98int iwm_load_fw(struct iwm_priv *iwm);
99
100#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/hal.c b/drivers/net/wireless/iwmc3200wifi/hal.c
deleted file mode 100644
index 1cabcb39643f..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/hal.c
+++ /dev/null
@@ -1,470 +0,0 @@
1/*
2 * Intel Wireless Multicomm 3200 WiFi driver
3 *
4 * Copyright (C) 2009 Intel Corporation. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 *
33 * Intel Corporation <ilw@linux.intel.com>
34 * Samuel Ortiz <samuel.ortiz@intel.com>
35 * Zhu Yi <yi.zhu@intel.com>
36 *
37 */
38
39/*
40 * Hardware Abstraction Layer for iwm.
41 *
42 * This file mostly defines an abstraction API for
43 * sending various commands to the target.
44 *
45 * We have 2 types of commands: wifi and non-wifi ones.
46 *
47 * - wifi commands:
48 * They are used for sending LMAC and UMAC commands,
49 * and thus are the most commonly used ones.
50 * There are 2 different wifi command types, the regular
51 * one and the LMAC one. The former is used to send
52 * UMAC commands (see UMAC_CMD_OPCODE_* from umac.h)
53 * while the latter is used for sending commands to the
54 * LMAC. If you look at LMAC commands you'll se that they
55 * are actually regular iwlwifi target commands encapsulated
56 * into a special UMAC command called UMAC passthrough.
57 * This is due to the fact the host talks exclusively
58 * to the UMAC and so there needs to be a special UMAC
59 * command for talking to the LMAC.
60 * This is how a wifi command is laid out:
61 * ------------------------
62 * | iwm_udma_out_wifi_hdr |
63 * ------------------------
64 * | SW meta_data (32 bits) |
65 * ------------------------
66 * | iwm_dev_cmd_hdr |
67 * ------------------------
68 * | payload |
69 * | .... |
70 *
71 * - non-wifi, or general commands:
72 * Those commands are handled by the device's bootrom,
73 * and are typically sent when the UMAC and the LMAC
74 * are not yet available.
75 * * This is how a non-wifi command is laid out:
76 * ---------------------------
77 * | iwm_udma_out_nonwifi_hdr |
78 * ---------------------------
79 * | payload |
80 * | .... |
81
82 *
83 * All the commands start with a UDMA header, which is
84 * basically a 32 bits field. The 4 LSB there define
85 * an opcode that allows the target to differentiate
86 * between wifi (opcode is 0xf) and non-wifi commands
87 * (opcode is [0..0xe]).
88 *
89 * When a command (wifi or non-wifi) is supposed to receive
90 * an answer, we queue the command buffer. When we do receive
91 * a command response from the UMAC, we go through the list
92 * of pending command, and pass both the command and the answer
93 * to the rx handler. Each command is sent with a unique
94 * sequence id, and the answer is sent with the same one. This
95 * is how we're supposed to match an answer with its command.
96 * See rx.c:iwm_rx_handle_[non]wifi() and iwm_get_pending_[non]wifi()
97 * for the implementation details.
98 */
99#include <linux/kernel.h>
100#include <linux/netdevice.h>
101#include <linux/slab.h>
102
103#include "iwm.h"
104#include "bus.h"
105#include "hal.h"
106#include "umac.h"
107#include "debug.h"
108#include "trace.h"
109
110static int iwm_nonwifi_cmd_init(struct iwm_priv *iwm,
111 struct iwm_nonwifi_cmd *cmd,
112 struct iwm_udma_nonwifi_cmd *udma_cmd)
113{
114 INIT_LIST_HEAD(&cmd->pending);
115
116 spin_lock(&iwm->cmd_lock);
117
118 cmd->resp_received = 0;
119
120 cmd->seq_num = iwm->nonwifi_seq_num;
121 udma_cmd->seq_num = cpu_to_le16(cmd->seq_num);
122
123 iwm->nonwifi_seq_num++;
124 iwm->nonwifi_seq_num %= UMAC_NONWIFI_SEQ_NUM_MAX;
125
126 if (udma_cmd->resp)
127 list_add_tail(&cmd->pending, &iwm->nonwifi_pending_cmd);
128
129 spin_unlock(&iwm->cmd_lock);
130
131 cmd->buf.start = cmd->buf.payload;
132 cmd->buf.len = 0;
133
134 memcpy(&cmd->udma_cmd, udma_cmd, sizeof(*udma_cmd));
135
136 return cmd->seq_num;
137}
138
139u16 iwm_alloc_wifi_cmd_seq(struct iwm_priv *iwm)
140{
141 u16 seq_num = iwm->wifi_seq_num;
142
143 iwm->wifi_seq_num++;
144 iwm->wifi_seq_num %= UMAC_WIFI_SEQ_NUM_MAX;
145
146 return seq_num;
147}
148
149static void iwm_wifi_cmd_init(struct iwm_priv *iwm,
150 struct iwm_wifi_cmd *cmd,
151 struct iwm_udma_wifi_cmd *udma_cmd,
152 struct iwm_umac_cmd *umac_cmd,
153 struct iwm_lmac_cmd *lmac_cmd,
154 u16 payload_size)
155{
156 INIT_LIST_HEAD(&cmd->pending);
157
158 spin_lock(&iwm->cmd_lock);
159
160 cmd->seq_num = iwm_alloc_wifi_cmd_seq(iwm);
161 umac_cmd->seq_num = cpu_to_le16(cmd->seq_num);
162
163 if (umac_cmd->resp)
164 list_add_tail(&cmd->pending, &iwm->wifi_pending_cmd);
165
166 spin_unlock(&iwm->cmd_lock);
167
168 cmd->buf.start = cmd->buf.payload;
169 cmd->buf.len = 0;
170
171 if (lmac_cmd) {
172 cmd->buf.start -= sizeof(struct iwm_lmac_hdr);
173
174 lmac_cmd->seq_num = cpu_to_le16(cmd->seq_num);
175 lmac_cmd->count = cpu_to_le16(payload_size);
176
177 memcpy(&cmd->lmac_cmd, lmac_cmd, sizeof(*lmac_cmd));
178
179 umac_cmd->count = cpu_to_le16(sizeof(struct iwm_lmac_hdr));
180 } else
181 umac_cmd->count = 0;
182
183 umac_cmd->count = cpu_to_le16(payload_size +
184 le16_to_cpu(umac_cmd->count));
185 udma_cmd->count = cpu_to_le16(sizeof(struct iwm_umac_fw_cmd_hdr) +
186 le16_to_cpu(umac_cmd->count));
187
188 memcpy(&cmd->udma_cmd, udma_cmd, sizeof(*udma_cmd));
189 memcpy(&cmd->umac_cmd, umac_cmd, sizeof(*umac_cmd));
190}
191
192void iwm_cmd_flush(struct iwm_priv *iwm)
193{
194 struct iwm_wifi_cmd *wcmd, *wnext;
195 struct iwm_nonwifi_cmd *nwcmd, *nwnext;
196
197 list_for_each_entry_safe(wcmd, wnext, &iwm->wifi_pending_cmd, pending) {
198 list_del(&wcmd->pending);
199 kfree(wcmd);
200 }
201
202 list_for_each_entry_safe(nwcmd, nwnext, &iwm->nonwifi_pending_cmd,
203 pending) {
204 list_del(&nwcmd->pending);
205 kfree(nwcmd);
206 }
207}
208
209struct iwm_wifi_cmd *iwm_get_pending_wifi_cmd(struct iwm_priv *iwm, u16 seq_num)
210{
211 struct iwm_wifi_cmd *cmd;
212
213 list_for_each_entry(cmd, &iwm->wifi_pending_cmd, pending)
214 if (cmd->seq_num == seq_num) {
215 list_del(&cmd->pending);
216 return cmd;
217 }
218
219 return NULL;
220}
221
222struct iwm_nonwifi_cmd *iwm_get_pending_nonwifi_cmd(struct iwm_priv *iwm,
223 u8 seq_num, u8 cmd_opcode)
224{
225 struct iwm_nonwifi_cmd *cmd;
226
227 list_for_each_entry(cmd, &iwm->nonwifi_pending_cmd, pending)
228 if ((cmd->seq_num == seq_num) &&
229 (cmd->udma_cmd.opcode == cmd_opcode) &&
230 (cmd->resp_received)) {
231 list_del(&cmd->pending);
232 return cmd;
233 }
234
235 return NULL;
236}
237
238static void iwm_build_udma_nonwifi_hdr(struct iwm_priv *iwm,
239 struct iwm_udma_out_nonwifi_hdr *hdr,
240 struct iwm_udma_nonwifi_cmd *cmd)
241{
242 memset(hdr, 0, sizeof(*hdr));
243
244 SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_OPCODE, cmd->opcode);
245 SET_VAL32(hdr->cmd, UDMA_HDI_OUT_NW_CMD_RESP, cmd->resp);
246 SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_EOT, 1);
247 SET_VAL32(hdr->cmd, UDMA_HDI_OUT_NW_CMD_HANDLE_BY_HW,
248 cmd->handle_by_hw);
249 SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_SIGNATURE, UMAC_HDI_OUT_SIGNATURE);
250 SET_VAL32(hdr->cmd, UDMA_HDI_OUT_CMD_NON_WIFI_HW_SEQ_NUM,
251 le16_to_cpu(cmd->seq_num));
252
253 hdr->addr = cmd->addr;
254 hdr->op1_sz = cmd->op1_sz;
255 hdr->op2 = cmd->op2;
256}
257
258static int iwm_send_udma_nonwifi_cmd(struct iwm_priv *iwm,
259 struct iwm_nonwifi_cmd *cmd)
260{
261 struct iwm_udma_out_nonwifi_hdr *udma_hdr;
262 struct iwm_nonwifi_cmd_buff *buf;
263 struct iwm_udma_nonwifi_cmd *udma_cmd = &cmd->udma_cmd;
264
265 buf = &cmd->buf;
266
267 buf->start -= sizeof(struct iwm_umac_nonwifi_out_hdr);
268 buf->len += sizeof(struct iwm_umac_nonwifi_out_hdr);
269
270 udma_hdr = (struct iwm_udma_out_nonwifi_hdr *)(buf->start);
271
272 iwm_build_udma_nonwifi_hdr(iwm, udma_hdr, udma_cmd);
273
274 IWM_DBG_CMD(iwm, DBG,
275 "Send UDMA nonwifi cmd: opcode = 0x%x, resp = 0x%x, "
276 "hw = 0x%x, seqnum = %d, addr = 0x%x, op1_sz = 0x%x, "
277 "op2 = 0x%x\n", udma_cmd->opcode, udma_cmd->resp,
278 udma_cmd->handle_by_hw, cmd->seq_num, udma_cmd->addr,
279 udma_cmd->op1_sz, udma_cmd->op2);
280
281 trace_iwm_tx_nonwifi_cmd(iwm, udma_hdr);
282 return iwm_bus_send_chunk(iwm, buf->start, buf->len);
283}
284
285void iwm_udma_wifi_hdr_set_eop(struct iwm_priv *iwm, u8 *buf, u8 eop)
286{
287 struct iwm_udma_out_wifi_hdr *hdr = (struct iwm_udma_out_wifi_hdr *)buf;
288
289 SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_EOT, eop);
290}
291
292void iwm_build_udma_wifi_hdr(struct iwm_priv *iwm,
293 struct iwm_udma_out_wifi_hdr *hdr,
294 struct iwm_udma_wifi_cmd *cmd)
295{
296 memset(hdr, 0, sizeof(*hdr));
297
298 SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_OPCODE, UMAC_HDI_OUT_OPCODE_WIFI);
299 SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_EOT, cmd->eop);
300 SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_SIGNATURE, UMAC_HDI_OUT_SIGNATURE);
301
302 SET_VAL32(hdr->meta_data, UMAC_HDI_OUT_BYTE_COUNT,
303 le16_to_cpu(cmd->count));
304 SET_VAL32(hdr->meta_data, UMAC_HDI_OUT_CREDIT_GRP, cmd->credit_group);
305 SET_VAL32(hdr->meta_data, UMAC_HDI_OUT_RATID, cmd->ra_tid);
306 SET_VAL32(hdr->meta_data, UMAC_HDI_OUT_LMAC_OFFSET, cmd->lmac_offset);
307}
308
309void iwm_build_umac_hdr(struct iwm_priv *iwm,
310 struct iwm_umac_fw_cmd_hdr *hdr,
311 struct iwm_umac_cmd *cmd)
312{
313 memset(hdr, 0, sizeof(*hdr));
314
315 SET_VAL32(hdr->meta_data, UMAC_FW_CMD_BYTE_COUNT,
316 le16_to_cpu(cmd->count));
317 SET_VAL32(hdr->meta_data, UMAC_FW_CMD_TX_STA_COLOR, cmd->color);
318 SET_VAL8(hdr->cmd.flags, UMAC_DEV_CMD_FLAGS_RESP_REQ, cmd->resp);
319
320 hdr->cmd.cmd = cmd->id;
321 hdr->cmd.seq_num = cmd->seq_num;
322}
323
324static int iwm_send_udma_wifi_cmd(struct iwm_priv *iwm,
325 struct iwm_wifi_cmd *cmd)
326{
327 struct iwm_umac_wifi_out_hdr *umac_hdr;
328 struct iwm_wifi_cmd_buff *buf;
329 struct iwm_udma_wifi_cmd *udma_cmd = &cmd->udma_cmd;
330 struct iwm_umac_cmd *umac_cmd = &cmd->umac_cmd;
331 int ret;
332
333 buf = &cmd->buf;
334
335 buf->start -= sizeof(struct iwm_umac_wifi_out_hdr);
336 buf->len += sizeof(struct iwm_umac_wifi_out_hdr);
337
338 umac_hdr = (struct iwm_umac_wifi_out_hdr *)(buf->start);
339
340 iwm_build_udma_wifi_hdr(iwm, &umac_hdr->hw_hdr, udma_cmd);
341 iwm_build_umac_hdr(iwm, &umac_hdr->sw_hdr, umac_cmd);
342
343 IWM_DBG_CMD(iwm, DBG,
344 "Send UDMA wifi cmd: opcode = 0x%x, UMAC opcode = 0x%x, "
345 "eop = 0x%x, count = 0x%x, credit_group = 0x%x, "
346 "ra_tid = 0x%x, lmac_offset = 0x%x, seqnum = %d\n",
347 UMAC_HDI_OUT_OPCODE_WIFI, umac_cmd->id,
348 udma_cmd->eop, udma_cmd->count, udma_cmd->credit_group,
349 udma_cmd->ra_tid, udma_cmd->lmac_offset, cmd->seq_num);
350
351 if (umac_cmd->id == UMAC_CMD_OPCODE_WIFI_PASS_THROUGH)
352 IWM_DBG_CMD(iwm, DBG, "\tLMAC opcode: 0x%x\n",
353 cmd->lmac_cmd.id);
354
355 ret = iwm_tx_credit_alloc(iwm, udma_cmd->credit_group, buf->len);
356
357 /* We keep sending UMAC reset regardless of the command credits.
358 * The UMAC is supposed to be reset anyway and the Tx credits are
359 * reinitialized afterwards. If we are lucky, the reset could
360 * still be done even though we have run out of credits for the
361 * command pool at this moment.*/
362 if (ret && (umac_cmd->id != UMAC_CMD_OPCODE_RESET)) {
363 IWM_DBG_TX(iwm, DBG, "Failed to alloc tx credit for cmd %d\n",
364 umac_cmd->id);
365 return ret;
366 }
367
368 trace_iwm_tx_wifi_cmd(iwm, umac_hdr);
369 return iwm_bus_send_chunk(iwm, buf->start, buf->len);
370}
371
372/* target_cmd a.k.a udma_nonwifi_cmd can be sent when UMAC is not available */
373int iwm_hal_send_target_cmd(struct iwm_priv *iwm,
374 struct iwm_udma_nonwifi_cmd *udma_cmd,
375 const void *payload)
376{
377 struct iwm_nonwifi_cmd *cmd;
378 int ret, seq_num;
379
380 cmd = kzalloc(sizeof(struct iwm_nonwifi_cmd), GFP_KERNEL);
381 if (!cmd) {
382 IWM_ERR(iwm, "Couldn't alloc memory for hal cmd\n");
383 return -ENOMEM;
384 }
385
386 seq_num = iwm_nonwifi_cmd_init(iwm, cmd, udma_cmd);
387
388 if (cmd->udma_cmd.opcode == UMAC_HDI_OUT_OPCODE_WRITE ||
389 cmd->udma_cmd.opcode == UMAC_HDI_OUT_OPCODE_WRITE_PERSISTENT) {
390 cmd->buf.len = le32_to_cpu(cmd->udma_cmd.op1_sz);
391 memcpy(&cmd->buf.payload, payload, cmd->buf.len);
392 }
393
394 ret = iwm_send_udma_nonwifi_cmd(iwm, cmd);
395
396 if (!udma_cmd->resp)
397 kfree(cmd);
398
399 if (ret < 0)
400 return ret;
401
402 return seq_num;
403}
404
405static void iwm_build_lmac_hdr(struct iwm_priv *iwm, struct iwm_lmac_hdr *hdr,
406 struct iwm_lmac_cmd *cmd)
407{
408 memset(hdr, 0, sizeof(*hdr));
409
410 hdr->id = cmd->id;
411 hdr->flags = 0; /* Is this ever used? */
412 hdr->seq_num = cmd->seq_num;
413}
414
415/*
416 * iwm_hal_send_host_cmd(): sends commands to the UMAC or the LMAC.
417 * Sending command to the LMAC is equivalent to sending a
418 * regular UMAC command with the LMAC passthrough or the LMAC
419 * wrapper UMAC command IDs.
420 */
421int iwm_hal_send_host_cmd(struct iwm_priv *iwm,
422 struct iwm_udma_wifi_cmd *udma_cmd,
423 struct iwm_umac_cmd *umac_cmd,
424 struct iwm_lmac_cmd *lmac_cmd,
425 const void *payload, u16 payload_size)
426{
427 struct iwm_wifi_cmd *cmd;
428 struct iwm_lmac_hdr *hdr;
429 int lmac_hdr_len = 0;
430 int ret;
431
432 cmd = kzalloc(sizeof(struct iwm_wifi_cmd), GFP_KERNEL);
433 if (!cmd) {
434 IWM_ERR(iwm, "Couldn't alloc memory for wifi hal cmd\n");
435 return -ENOMEM;
436 }
437
438 iwm_wifi_cmd_init(iwm, cmd, udma_cmd, umac_cmd, lmac_cmd, payload_size);
439
440 if (lmac_cmd) {
441 hdr = (struct iwm_lmac_hdr *)(cmd->buf.start);
442
443 iwm_build_lmac_hdr(iwm, hdr, &cmd->lmac_cmd);
444 lmac_hdr_len = sizeof(struct iwm_lmac_hdr);
445 }
446
447 memcpy(cmd->buf.payload, payload, payload_size);
448 cmd->buf.len = le16_to_cpu(umac_cmd->count);
449
450 ret = iwm_send_udma_wifi_cmd(iwm, cmd);
451
452 /* We free the cmd if we're not expecting any response */
453 if (!umac_cmd->resp)
454 kfree(cmd);
455 return ret;
456}
457
458/*
459 * iwm_hal_send_umac_cmd(): This is a special case for
460 * iwm_hal_send_host_cmd() to send direct UMAC cmd (without
461 * LMAC involved).
462 */
463int iwm_hal_send_umac_cmd(struct iwm_priv *iwm,
464 struct iwm_udma_wifi_cmd *udma_cmd,
465 struct iwm_umac_cmd *umac_cmd,
466 const void *payload, u16 payload_size)
467{
468 return iwm_hal_send_host_cmd(iwm, udma_cmd, umac_cmd, NULL,
469 payload, payload_size);
470}
diff --git a/drivers/net/wireless/iwmc3200wifi/hal.h b/drivers/net/wireless/iwmc3200wifi/hal.h
deleted file mode 100644
index c20936d9b6b7..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/hal.h
+++ /dev/null
@@ -1,237 +0,0 @@
1/*
2 * Intel Wireless Multicomm 3200 WiFi driver
3 *
4 * Copyright (C) 2009 Intel Corporation. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 *
33 * Intel Corporation <ilw@linux.intel.com>
34 * Samuel Ortiz <samuel.ortiz@intel.com>
35 * Zhu Yi <yi.zhu@intel.com>
36 *
37 */
38
39#ifndef _IWM_HAL_H_
40#define _IWM_HAL_H_
41
42#include "umac.h"
43
44#define GET_VAL8(s, name) ((s >> name##_POS) & name##_SEED)
45#define GET_VAL16(s, name) ((le16_to_cpu(s) >> name##_POS) & name##_SEED)
46#define GET_VAL32(s, name) ((le32_to_cpu(s) >> name##_POS) & name##_SEED)
47
48#define SET_VAL8(s, name, val) \
49do { \
50 s = (s & ~(name##_SEED << name##_POS)) | \
51 ((val & name##_SEED) << name##_POS); \
52} while (0)
53
54#define SET_VAL16(s, name, val) \
55do { \
56 s = cpu_to_le16((le16_to_cpu(s) & ~(name##_SEED << name##_POS)) | \
57 ((val & name##_SEED) << name##_POS)); \
58} while (0)
59
60#define SET_VAL32(s, name, val) \
61do { \
62 s = cpu_to_le32((le32_to_cpu(s) & ~(name##_SEED << name##_POS)) | \
63 ((val & name##_SEED) << name##_POS)); \
64} while (0)
65
66
67#define UDMA_UMAC_INIT { .eop = 1, \
68 .credit_group = 0x4, \
69 .ra_tid = UMAC_HDI_ACT_TBL_IDX_HOST_CMD, \
70 .lmac_offset = 0 }
71#define UDMA_LMAC_INIT { .eop = 1, \
72 .credit_group = 0x4, \
73 .ra_tid = UMAC_HDI_ACT_TBL_IDX_HOST_CMD, \
74 .lmac_offset = 4 }
75
76
77/* UDMA IN OP CODE -- cmd bits [3:0] */
78#define UDMA_HDI_IN_NW_CMD_OPCODE_POS 0
79#define UDMA_HDI_IN_NW_CMD_OPCODE_SEED 0xF
80
81#define UDMA_IN_OPCODE_GENERAL_RESP 0x0
82#define UDMA_IN_OPCODE_READ_RESP 0x1
83#define UDMA_IN_OPCODE_WRITE_RESP 0x2
84#define UDMA_IN_OPCODE_PERS_WRITE_RESP 0x5
85#define UDMA_IN_OPCODE_PERS_READ_RESP 0x6
86#define UDMA_IN_OPCODE_RD_MDFY_WR_RESP 0x7
87#define UDMA_IN_OPCODE_EP_MNGMT_MSG 0x8
88#define UDMA_IN_OPCODE_CRDT_CHNG_MSG 0x9
89#define UDMA_IN_OPCODE_CNTRL_DATABASE_MSG 0xA
90#define UDMA_IN_OPCODE_SW_MSG 0xB
91#define UDMA_IN_OPCODE_WIFI 0xF
92#define UDMA_IN_OPCODE_WIFI_LMAC 0x1F
93#define UDMA_IN_OPCODE_WIFI_UMAC 0x2F
94
95/* HW API: udma_hdi_nonwifi API (OUT and IN) */
96
97/* iwm_udma_nonwifi_cmd request response -- bits [9:9] */
98#define UDMA_HDI_OUT_NW_CMD_RESP_POS 9
99#define UDMA_HDI_OUT_NW_CMD_RESP_SEED 0x1
100
101/* iwm_udma_nonwifi_cmd handle by HW -- bits [11:11] */
102#define UDMA_HDI_OUT_NW_CMD_HANDLE_BY_HW_POS 11
103#define UDMA_HDI_OUT_NW_CMD_HANDLE_BY_HW_SEED 0x1
104
105/* iwm_udma_nonwifi_cmd sequence-number -- bits [12:15] */
106#define UDMA_HDI_OUT_NW_CMD_SEQ_NUM_POS 12
107#define UDMA_HDI_OUT_NW_CMD_SEQ_NUM_SEED 0xF
108
109/* UDMA IN Non-WIFI HW sequence number -- bits [12:15] */
110#define UDMA_IN_NW_HW_SEQ_NUM_POS 12
111#define UDMA_IN_NW_HW_SEQ_NUM_SEED 0xF
112
113/* UDMA IN Non-WIFI HW signature -- bits [16:31] */
114#define UDMA_IN_NW_HW_SIG_POS 16
115#define UDMA_IN_NW_HW_SIG_SEED 0xFFFF
116
117/* fixed signature */
118#define UDMA_IN_NW_HW_SIG 0xCBBC
119
120/* UDMA IN Non-WIFI HW block length -- bits [32:35] */
121#define UDMA_IN_NW_HW_LENGTH_SEED 0xF
122#define UDMA_IN_NW_HW_LENGTH_POS 32
123
124/* End of HW API: udma_hdi_nonwifi API (OUT and IN) */
125
126#define IWM_SDIO_FW_MAX_CHUNK_SIZE 2032
127#define IWM_MAX_WIFI_HEADERS_SIZE 32
128#define IWM_MAX_NONWIFI_HEADERS_SIZE 16
129#define IWM_MAX_NONWIFI_CMD_BUFF_SIZE (IWM_SDIO_FW_MAX_CHUNK_SIZE - \
130 IWM_MAX_NONWIFI_HEADERS_SIZE)
131#define IWM_MAX_WIFI_CMD_BUFF_SIZE (IWM_SDIO_FW_MAX_CHUNK_SIZE - \
132 IWM_MAX_WIFI_HEADERS_SIZE)
133
134#define IWM_HAL_CONCATENATE_BUF_SIZE (32 * 1024)
135
136struct iwm_wifi_cmd_buff {
137 u16 len;
138 u8 *start;
139 u8 hdr[IWM_MAX_WIFI_HEADERS_SIZE];
140 u8 payload[IWM_MAX_WIFI_CMD_BUFF_SIZE];
141};
142
143struct iwm_nonwifi_cmd_buff {
144 u16 len;
145 u8 *start;
146 u8 hdr[IWM_MAX_NONWIFI_HEADERS_SIZE];
147 u8 payload[IWM_MAX_NONWIFI_CMD_BUFF_SIZE];
148};
149
150struct iwm_udma_nonwifi_cmd {
151 u8 opcode;
152 u8 eop;
153 u8 resp;
154 u8 handle_by_hw;
155 __le32 addr;
156 __le32 op1_sz;
157 __le32 op2;
158 __le16 seq_num;
159};
160
161struct iwm_udma_wifi_cmd {
162 __le16 count;
163 u8 eop;
164 u8 credit_group;
165 u8 ra_tid;
166 u8 lmac_offset;
167};
168
169struct iwm_umac_cmd {
170 u8 id;
171 __le16 count;
172 u8 resp;
173 __le16 seq_num;
174 u8 color;
175};
176
177struct iwm_lmac_cmd {
178 u8 id;
179 __le16 count;
180 u8 resp;
181 __le16 seq_num;
182};
183
184struct iwm_nonwifi_cmd {
185 u16 seq_num;
186 bool resp_received;
187 struct list_head pending;
188 struct iwm_udma_nonwifi_cmd udma_cmd;
189 struct iwm_umac_cmd umac_cmd;
190 struct iwm_lmac_cmd lmac_cmd;
191 struct iwm_nonwifi_cmd_buff buf;
192 u32 flags;
193};
194
195struct iwm_wifi_cmd {
196 u16 seq_num;
197 struct list_head pending;
198 struct iwm_udma_wifi_cmd udma_cmd;
199 struct iwm_umac_cmd umac_cmd;
200 struct iwm_lmac_cmd lmac_cmd;
201 struct iwm_wifi_cmd_buff buf;
202 u32 flags;
203};
204
205void iwm_cmd_flush(struct iwm_priv *iwm);
206
207struct iwm_wifi_cmd *iwm_get_pending_wifi_cmd(struct iwm_priv *iwm,
208 u16 seq_num);
209struct iwm_nonwifi_cmd *iwm_get_pending_nonwifi_cmd(struct iwm_priv *iwm,
210 u8 seq_num, u8 cmd_opcode);
211
212
213int iwm_hal_send_target_cmd(struct iwm_priv *iwm,
214 struct iwm_udma_nonwifi_cmd *ucmd,
215 const void *payload);
216
217int iwm_hal_send_host_cmd(struct iwm_priv *iwm,
218 struct iwm_udma_wifi_cmd *udma_cmd,
219 struct iwm_umac_cmd *umac_cmd,
220 struct iwm_lmac_cmd *lmac_cmd,
221 const void *payload, u16 payload_size);
222
223int iwm_hal_send_umac_cmd(struct iwm_priv *iwm,
224 struct iwm_udma_wifi_cmd *udma_cmd,
225 struct iwm_umac_cmd *umac_cmd,
226 const void *payload, u16 payload_size);
227
228u16 iwm_alloc_wifi_cmd_seq(struct iwm_priv *iwm);
229
230void iwm_udma_wifi_hdr_set_eop(struct iwm_priv *iwm, u8 *buf, u8 eop);
231void iwm_build_udma_wifi_hdr(struct iwm_priv *iwm,
232 struct iwm_udma_out_wifi_hdr *hdr,
233 struct iwm_udma_wifi_cmd *cmd);
234void iwm_build_umac_hdr(struct iwm_priv *iwm,
235 struct iwm_umac_fw_cmd_hdr *hdr,
236 struct iwm_umac_cmd *cmd);
237#endif /* _IWM_HAL_H_ */
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
deleted file mode 100644
index 51d7efa15ae6..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
+++ /dev/null
@@ -1,367 +0,0 @@
1/*
2 * Intel Wireless Multicomm 3200 WiFi driver
3 *
4 * Copyright (C) 2009 Intel Corporation. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 *
33 * Intel Corporation <ilw@linux.intel.com>
34 * Samuel Ortiz <samuel.ortiz@intel.com>
35 * Zhu Yi <yi.zhu@intel.com>
36 *
37 */
38
39#ifndef __IWM_H__
40#define __IWM_H__
41
42#include <linux/netdevice.h>
43#include <linux/wireless.h>
44#include <net/cfg80211.h>
45
46#include "debug.h"
47#include "hal.h"
48#include "umac.h"
49#include "lmac.h"
50#include "eeprom.h"
51#include "trace.h"
52
53#define IWM_COPYRIGHT "Copyright(c) 2009 Intel Corporation"
54#define IWM_AUTHOR "<ilw@linux.intel.com>"
55
56#define IWM_SRC_LMAC UMAC_HDI_IN_SOURCE_FHRX
57#define IWM_SRC_UDMA UMAC_HDI_IN_SOURCE_UDMA
58#define IWM_SRC_UMAC UMAC_HDI_IN_SOURCE_FW
59#define IWM_SRC_NUM 3
60
61#define IWM_POWER_INDEX_MIN 0
62#define IWM_POWER_INDEX_MAX 5
63#define IWM_POWER_INDEX_DEFAULT 3
64
65struct iwm_conf {
66 u32 sdio_ior_timeout;
67 unsigned long calib_map;
68 unsigned long expected_calib_map;
69 u8 ct_kill_entry;
70 u8 ct_kill_exit;
71 bool reset_on_fatal_err;
72 bool auto_connect;
73 bool wimax_not_present;
74 bool enable_qos;
75 u32 mode;
76
77 u32 power_index;
78 u32 frag_threshold;
79 u32 rts_threshold;
80 bool cts_to_self;
81
82 u32 assoc_timeout;
83 u32 roam_timeout;
84 u32 wireless_mode;
85
86 u8 ibss_band;
87 u8 ibss_channel;
88
89 u8 mac_addr[ETH_ALEN];
90};
91
92enum {
93 COEX_MODE_SA = 1,
94 COEX_MODE_XOR,
95 COEX_MODE_CM,
96 COEX_MODE_MAX,
97};
98
99struct iwm_if_ops;
100struct iwm_wifi_cmd;
101
102struct pool_entry {
103 int id; /* group id */
104 int sid; /* super group id */
105 int min_pages; /* min capacity in pages */
106 int max_pages; /* max capacity in pages */
107 int alloc_pages; /* allocated # of pages. incresed by driver */
108 int total_freed_pages; /* total freed # of pages. incresed by UMAC */
109};
110
111struct spool_entry {
112 int id;
113 int max_pages;
114 int alloc_pages;
115};
116
117struct iwm_tx_credit {
118 spinlock_t lock;
119 int pool_nr;
120 unsigned long full_pools_map; /* bitmap for # of filled tx pools */
121 struct pool_entry pools[IWM_MACS_OUT_GROUPS];
122 struct spool_entry spools[IWM_MACS_OUT_SGROUPS];
123};
124
125struct iwm_notif {
126 struct list_head pending;
127 u32 cmd_id;
128 void *cmd;
129 u8 src;
130 void *buf;
131 unsigned long buf_size;
132};
133
134struct iwm_tid_info {
135 __le16 last_seq_num;
136 bool stopped;
137 struct mutex mutex;
138};
139
140struct iwm_sta_info {
141 u8 addr[ETH_ALEN];
142 bool valid;
143 bool qos;
144 u8 color;
145 struct iwm_tid_info tid_info[IWM_UMAC_TID_NR];
146};
147
148struct iwm_tx_info {
149 u8 sta;
150 u8 color;
151 u8 tid;
152};
153
154struct iwm_rx_info {
155 unsigned long rx_size;
156 unsigned long rx_buf_size;
157};
158
159#define IWM_NUM_KEYS 4
160
161struct iwm_umac_key_hdr {
162 u8 mac[ETH_ALEN];
163 u8 key_idx;
164 u8 multicast; /* BCast encrypt & BCast decrypt of frames FROM mac */
165} __packed;
166
167struct iwm_key {
168 struct iwm_umac_key_hdr hdr;
169 u32 cipher;
170 u8 key[WLAN_MAX_KEY_LEN];
171 u8 seq[IW_ENCODE_SEQ_MAX_SIZE];
172 int key_len;
173 int seq_len;
174};
175
176#define IWM_RX_ID_HASH 0xff
177#define IWM_RX_ID_GET_HASH(id) ((id) % IWM_RX_ID_HASH)
178
179#define IWM_STA_TABLE_NUM 16
180#define IWM_TX_LIST_SIZE 64
181#define IWM_RX_LIST_SIZE 256
182
183#define IWM_SCAN_ID_MAX 0xff
184
185#define IWM_STATUS_READY 0
186#define IWM_STATUS_SCANNING 1
187#define IWM_STATUS_SCAN_ABORTING 2
188#define IWM_STATUS_SME_CONNECTING 3
189#define IWM_STATUS_ASSOCIATED 4
190#define IWM_STATUS_RESETTING 5
191
192struct iwm_tx_queue {
193 int id;
194 struct sk_buff_head queue;
195 struct sk_buff_head stopped_queue;
196 spinlock_t lock;
197 struct workqueue_struct *wq;
198 struct work_struct worker;
199 u8 concat_buf[IWM_HAL_CONCATENATE_BUF_SIZE];
200 int concat_count;
201 u8 *concat_ptr;
202};
203
204/* Queues 0 ~ 3 for AC data, 5 for iPAN */
205#define IWM_TX_QUEUES 5
206#define IWM_TX_DATA_QUEUES 4
207#define IWM_TX_CMD_QUEUE 4
208
209struct iwm_bss_info {
210 struct list_head node;
211 struct cfg80211_bss *cfg_bss;
212 struct iwm_umac_notif_bss_info *bss;
213};
214
215typedef int (*iwm_handler)(struct iwm_priv *priv, u8 *buf,
216 unsigned long buf_size, struct iwm_wifi_cmd *cmd);
217
218#define IWM_WATCHDOG_PERIOD (6 * HZ)
219
220struct iwm_priv {
221 struct wireless_dev *wdev;
222 struct iwm_if_ops *bus_ops;
223
224 struct iwm_conf conf;
225
226 unsigned long status;
227
228 struct list_head pending_notif;
229 wait_queue_head_t notif_queue;
230
231 wait_queue_head_t nonwifi_queue;
232
233 unsigned long calib_done_map;
234 struct {
235 u8 *buf;
236 u32 size;
237 } calib_res[CALIBRATION_CMD_NUM];
238
239 struct iwm_umac_profile *umac_profile;
240 bool umac_profile_active;
241
242 u8 bssid[ETH_ALEN];
243 u8 channel;
244 u16 rate;
245 u32 txpower;
246
247 struct iwm_sta_info sta_table[IWM_STA_TABLE_NUM];
248 struct list_head bss_list;
249
250 void (*nonwifi_rx_handlers[UMAC_HDI_IN_OPCODE_NONWIFI_MAX])
251 (struct iwm_priv *priv, u8 *buf, unsigned long buf_size);
252
253 const iwm_handler *umac_handlers;
254 const iwm_handler *lmac_handlers;
255 DECLARE_BITMAP(lmac_handler_map, LMAC_COMMAND_ID_NUM);
256 DECLARE_BITMAP(umac_handler_map, LMAC_COMMAND_ID_NUM);
257 DECLARE_BITMAP(udma_handler_map, LMAC_COMMAND_ID_NUM);
258
259 struct list_head wifi_pending_cmd;
260 struct list_head nonwifi_pending_cmd;
261 u16 wifi_seq_num;
262 u8 nonwifi_seq_num;
263 spinlock_t cmd_lock;
264
265 u32 core_enabled;
266
267 u8 scan_id;
268 struct cfg80211_scan_request *scan_request;
269
270 struct sk_buff_head rx_list;
271 struct list_head rx_tickets;
272 spinlock_t ticket_lock;
273 struct list_head rx_packets[IWM_RX_ID_HASH];
274 spinlock_t packet_lock[IWM_RX_ID_HASH];
275 struct workqueue_struct *rx_wq;
276 struct work_struct rx_worker;
277
278 struct iwm_tx_credit tx_credit;
279 struct iwm_tx_queue txq[IWM_TX_QUEUES];
280
281 struct iwm_key keys[IWM_NUM_KEYS];
282 s8 default_key;
283
284 DECLARE_BITMAP(wifi_ntfy, WIFI_IF_NTFY_MAX);
285 wait_queue_head_t wifi_ntfy_queue;
286
287 wait_queue_head_t mlme_queue;
288
289 struct iw_statistics wstats;
290 struct delayed_work stats_request;
291 struct delayed_work disconnect;
292 struct delayed_work ct_kill_delay;
293
294 struct iwm_debugfs dbg;
295
296 u8 *eeprom;
297 struct timer_list watchdog;
298 struct work_struct reset_worker;
299 struct work_struct auth_retry_worker;
300 struct mutex mutex;
301
302 u8 *req_ie;
303 int req_ie_len;
304 u8 *resp_ie;
305 int resp_ie_len;
306
307 struct iwm_fw_error_hdr *last_fw_err;
308 char umac_version[8];
309 char lmac_version[8];
310
311 char private[0] __attribute__((__aligned__(NETDEV_ALIGN)));
312};
313
314static inline void *iwm_private(struct iwm_priv *iwm)
315{
316 BUG_ON(!iwm);
317 return &iwm->private;
318}
319
320#define hw_to_iwm(h) (h->iwm)
321#define iwm_to_dev(i) (wiphy_dev(i->wdev->wiphy))
322#define iwm_to_wiphy(i) (i->wdev->wiphy)
323#define wiphy_to_iwm(w) (struct iwm_priv *)(wiphy_priv(w))
324#define iwm_to_wdev(i) (i->wdev)
325#define wdev_to_iwm(w) (struct iwm_priv *)(wdev_priv(w))
326#define iwm_to_ndev(i) (i->wdev->netdev)
327#define ndev_to_iwm(n) (wdev_to_iwm(n->ieee80211_ptr))
328#define skb_to_rx_info(s) ((struct iwm_rx_info *)(s->cb))
329#define skb_to_tx_info(s) ((struct iwm_tx_info *)s->cb)
330
331void *iwm_if_alloc(int sizeof_bus, struct device *dev,
332 struct iwm_if_ops *if_ops);
333void iwm_if_free(struct iwm_priv *iwm);
334int iwm_if_add(struct iwm_priv *iwm);
335void iwm_if_remove(struct iwm_priv *iwm);
336int iwm_mode_to_nl80211_iftype(int mode);
337int iwm_priv_init(struct iwm_priv *iwm);
338void iwm_priv_deinit(struct iwm_priv *iwm);
339void iwm_reset(struct iwm_priv *iwm);
340void iwm_resetting(struct iwm_priv *iwm);
341void iwm_tx_credit_init_pools(struct iwm_priv *iwm,
342 struct iwm_umac_notif_alive *alive);
343int iwm_tx_credit_alloc(struct iwm_priv *iwm, int id, int nb);
344int iwm_notif_send(struct iwm_priv *iwm, struct iwm_wifi_cmd *cmd,
345 u8 cmd_id, u8 source, u8 *buf, unsigned long buf_size);
346int iwm_notif_handle(struct iwm_priv *iwm, u32 cmd, u8 source, long timeout);
347void iwm_init_default_profile(struct iwm_priv *iwm,
348 struct iwm_umac_profile *profile);
349void iwm_link_on(struct iwm_priv *iwm);
350void iwm_link_off(struct iwm_priv *iwm);
351int iwm_up(struct iwm_priv *iwm);
352int iwm_down(struct iwm_priv *iwm);
353
354/* TX API */
355int iwm_tid_to_queue(u16 tid);
356void iwm_tx_credit_inc(struct iwm_priv *iwm, int id, int total_freed_pages);
357void iwm_tx_worker(struct work_struct *work);
358int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
359
360/* RX API */
361void iwm_rx_setup_handlers(struct iwm_priv *iwm);
362int iwm_rx_handle(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size);
363int iwm_rx_handle_resp(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size,
364 struct iwm_wifi_cmd *cmd);
365void iwm_rx_free(struct iwm_priv *iwm);
366
367#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/lmac.h b/drivers/net/wireless/iwmc3200wifi/lmac.h
deleted file mode 100644
index 5ddcdf8c70c0..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/lmac.h
+++ /dev/null
@@ -1,484 +0,0 @@
1/*
2 * Intel Wireless Multicomm 3200 WiFi driver
3 *
4 * Copyright (C) 2009 Intel Corporation. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 *
33 * Intel Corporation <ilw@linux.intel.com>
34 * Samuel Ortiz <samuel.ortiz@intel.com>
35 * Zhu Yi <yi.zhu@intel.com>
36 *
37 */
38
39#ifndef __IWM_LMAC_H__
40#define __IWM_LMAC_H__
41
42struct iwm_lmac_hdr {
43 u8 id;
44 u8 flags;
45 __le16 seq_num;
46} __packed;
47
48/* LMAC commands */
49#define CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_AFTER_MSK 0x1
50
51struct iwm_lmac_cal_cfg_elt {
52 __le32 enable; /* 1 means LMAC needs to do something */
53 __le32 start; /* 1 to start calibration, 0 to stop */
54 __le32 send_res; /* 1 for sending back results */
55 __le32 apply_res; /* 1 for applying calibration results to HW */
56 __le32 reserved;
57} __packed;
58
59struct iwm_lmac_cal_cfg_status {
60 struct iwm_lmac_cal_cfg_elt init;
61 struct iwm_lmac_cal_cfg_elt periodic;
62 __le32 flags; /* CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_AFTER_MSK */
63} __packed;
64
65struct iwm_lmac_cal_cfg_cmd {
66 struct iwm_lmac_cal_cfg_status ucode_cfg;
67 struct iwm_lmac_cal_cfg_status driver_cfg;
68 __le32 reserved;
69} __packed;
70
71struct iwm_lmac_cal_cfg_resp {
72 __le32 status;
73} __packed;
74
75#define IWM_CARD_STATE_SW_HW_ENABLED 0x00
76#define IWM_CARD_STATE_HW_DISABLED 0x01
77#define IWM_CARD_STATE_SW_DISABLED 0x02
78#define IWM_CARD_STATE_CTKILL_DISABLED 0x04
79#define IWM_CARD_STATE_IS_RXON 0x10
80
81struct iwm_lmac_card_state {
82 __le32 flags;
83} __packed;
84
85/**
86 * COEX_PRIORITY_TABLE_CMD
87 *
88 * Priority entry for each state
89 * Will keep two tables, for STA and WIPAN
90 */
91enum {
92 /* UN-ASSOCIATION PART */
93 COEX_UNASSOC_IDLE = 0,
94 COEX_UNASSOC_MANUAL_SCAN,
95 COEX_UNASSOC_AUTO_SCAN,
96
97 /* CALIBRATION */
98 COEX_CALIBRATION,
99 COEX_PERIODIC_CALIBRATION,
100
101 /* CONNECTION */
102 COEX_CONNECTION_ESTAB,
103
104 /* ASSOCIATION PART */
105 COEX_ASSOCIATED_IDLE,
106 COEX_ASSOC_MANUAL_SCAN,
107 COEX_ASSOC_AUTO_SCAN,
108 COEX_ASSOC_ACTIVE_LEVEL,
109
110 /* RF ON/OFF */
111 COEX_RF_ON,
112 COEX_RF_OFF,
113 COEX_STAND_ALONE_DEBUG,
114
115 /* IPNN */
116 COEX_IPAN_ASSOC_LEVEL,
117
118 /* RESERVED */
119 COEX_RSRVD1,
120 COEX_RSRVD2,
121
122 COEX_EVENTS_NUM
123};
124
125#define COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK 0x1
126#define COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK 0x2
127#define COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_MSK 0x4
128
129struct coex_event {
130 u8 req_prio;
131 u8 win_med_prio;
132 u8 reserved;
133 u8 flags;
134} __packed;
135
136#define COEX_FLAGS_STA_TABLE_VALID_MSK 0x1
137#define COEX_FLAGS_UNASSOC_WAKEUP_UMASK_MSK 0x4
138#define COEX_FLAGS_ASSOC_WAKEUP_UMASK_MSK 0x8
139#define COEX_FLAGS_COEX_ENABLE_MSK 0x80
140
141struct iwm_coex_prio_table_cmd {
142 u8 flags;
143 u8 reserved[3];
144 struct coex_event sta_prio[COEX_EVENTS_NUM];
145} __packed;
146
147/* Coexistence definitions
148 *
149 * Constants to fill in the Priorities' Tables
150 * RP - Requested Priority
151 * WP - Win Medium Priority: priority assigned when the contention has been won
152 * FLAGS - Combination of COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK and
153 * COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK
154 */
155
156#define COEX_UNASSOC_IDLE_FLAGS 0
157#define COEX_UNASSOC_MANUAL_SCAN_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
158 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK)
159#define COEX_UNASSOC_AUTO_SCAN_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
160 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK)
161#define COEX_CALIBRATION_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
162 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK)
163#define COEX_PERIODIC_CALIBRATION_FLAGS 0
164/* COEX_CONNECTION_ESTAB: we need DELAY_MEDIUM_FREE_NTFY to let WiMAX
165 * disconnect from network. */
166#define COEX_CONNECTION_ESTAB_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
167 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK | \
168 COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_MSK)
169#define COEX_ASSOCIATED_IDLE_FLAGS 0
170#define COEX_ASSOC_MANUAL_SCAN_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
171 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK)
172#define COEX_ASSOC_AUTO_SCAN_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
173 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK)
174#define COEX_ASSOC_ACTIVE_LEVEL_FLAGS 0
175#define COEX_RF_ON_FLAGS 0
176#define COEX_RF_OFF_FLAGS 0
177#define COEX_STAND_ALONE_DEBUG_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
178 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK)
179#define COEX_IPAN_ASSOC_LEVEL_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
180 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK | \
181 COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_MSK)
182#define COEX_RSRVD1_FLAGS 0
183#define COEX_RSRVD2_FLAGS 0
184/* XOR_RF_ON is the event wrapping all radio ownership. We need
185 * DELAY_MEDIUM_FREE_NTFY to let WiMAX disconnect from network. */
186#define COEX_XOR_RF_ON_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
187 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK | \
188 COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_MSK)
189
190/* CT kill config command */
191struct iwm_ct_kill_cfg_cmd {
192 u32 exit_threshold;
193 u32 reserved;
194 u32 entry_threshold;
195} __packed;
196
197
198/* LMAC OP CODES */
199#define REPLY_PAD 0x0
200#define REPLY_ALIVE 0x1
201#define REPLY_ERROR 0x2
202#define REPLY_ECHO 0x3
203#define REPLY_HALT 0x6
204
205/* RXON state commands */
206#define REPLY_RX_ON 0x10
207#define REPLY_RX_ON_ASSOC 0x11
208#define REPLY_RX_OFF 0x12
209#define REPLY_QOS_PARAM 0x13
210#define REPLY_RX_ON_TIMING 0x14
211#define REPLY_INTERNAL_QOS_PARAM 0x15
212#define REPLY_RX_INT_TIMEOUT_CNFG 0x16
213#define REPLY_NULL 0x17
214
215/* Multi-Station support */
216#define REPLY_ADD_STA 0x18
217#define REPLY_REMOVE_STA 0x19
218#define REPLY_RESET_ALL_STA 0x1a
219
220/* RX, TX */
221#define REPLY_ALM_RX 0x1b
222#define REPLY_TX 0x1c
223#define REPLY_TXFIFO_FLUSH 0x1e
224
225/* MISC commands */
226#define REPLY_MGMT_MCAST_KEY 0x1f
227#define REPLY_WEPKEY 0x20
228#define REPLY_INIT_IV 0x21
229#define REPLY_WRITE_MIB 0x22
230#define REPLY_READ_MIB 0x23
231#define REPLY_RADIO_FE 0x24
232#define REPLY_TXFIFO_CFG 0x25
233#define REPLY_WRITE_READ 0x26
234#define REPLY_INSTALL_SEC_KEY 0x27
235
236
237#define REPLY_RATE_SCALE 0x47
238#define REPLY_LEDS_CMD 0x48
239#define REPLY_TX_LINK_QUALITY_CMD 0x4e
240#define REPLY_ANA_MIB_OVERRIDE_CMD 0x4f
241#define REPLY_WRITE2REG_CMD 0x50
242
243/* winfi-wifi coexistence */
244#define COEX_PRIORITY_TABLE_CMD 0x5a
245#define COEX_MEDIUM_NOTIFICATION 0x5b
246#define COEX_EVENT_CMD 0x5c
247
248/* more Protocol and Protocol-test commands */
249#define REPLY_MAX_SLEEP_TIME_CMD 0x61
250#define CALIBRATION_CFG_CMD 0x65
251#define CALIBRATION_RES_NOTIFICATION 0x66
252#define CALIBRATION_COMPLETE_NOTIFICATION 0x67
253
254/* Measurements */
255#define REPLY_QUIET_CMD 0x71
256#define REPLY_CHANNEL_SWITCH 0x72
257#define CHANNEL_SWITCH_NOTIFICATION 0x73
258
259#define REPLY_SPECTRUM_MEASUREMENT_CMD 0x74
260#define SPECTRUM_MEASURE_NOTIFICATION 0x75
261#define REPLY_MEASUREMENT_ABORT_CMD 0x76
262
263/* Power Management */
264#define POWER_TABLE_CMD 0x77
265#define SAVE_RESTORE_ADDRESS_CMD 0x78
266#define REPLY_WATERMARK_CMD 0x79
267#define PM_DEBUG_STATISTIC_NOTIFIC 0x7B
268#define PD_FLUSH_N_NOTIFICATION 0x7C
269
270/* Scan commands and notifications */
271#define REPLY_SCAN_REQUEST_CMD 0x80
272#define REPLY_SCAN_ABORT_CMD 0x81
273#define SCAN_START_NOTIFICATION 0x82
274#define SCAN_RESULTS_NOTIFICATION 0x83
275#define SCAN_COMPLETE_NOTIFICATION 0x84
276
277/* Continuous TX commands */
278#define REPLY_CONT_TX_CMD 0x85
279#define END_OF_CONT_TX_NOTIFICATION 0x86
280
281/* Timer/Eeprom commands */
282#define TIMER_CMD 0x87
283#define EEPROM_WRITE_CMD 0x88
284
285/* PAPD commands */
286#define FEEDBACK_REQUEST_NOTIFICATION 0x8b
287#define REPLY_CW_CMD 0x8c
288
289/* IBSS/AP commands Continue */
290#define BEACON_NOTIFICATION 0x90
291#define REPLY_TX_BEACON 0x91
292#define REPLY_REQUEST_ATIM 0x93
293#define WHO_IS_AWAKE_NOTIFICATION 0x94
294#define TX_PWR_DBM_LIMIT_CMD 0x95
295#define QUIET_NOTIFICATION 0x96
296#define TX_PWR_TABLE_CMD 0x97
297#define TX_ANT_CONFIGURATION_CMD 0x98
298#define MEASURE_ABORT_NOTIFICATION 0x99
299#define REPLY_CALIBRATION_TUNE 0x9a
300
301/* bt config command */
302#define REPLY_BT_CONFIG 0x9b
303#define REPLY_STATISTICS_CMD 0x9c
304#define STATISTICS_NOTIFICATION 0x9d
305
306/* RF-KILL commands and notifications */
307#define REPLY_CARD_STATE_CMD 0xa0
308#define CARD_STATE_NOTIFICATION 0xa1
309
310/* Missed beacons notification */
311#define MISSED_BEACONS_NOTIFICATION 0xa2
312#define MISSED_BEACONS_NOTIFICATION_TH_CMD 0xa3
313
314#define REPLY_CT_KILL_CONFIG_CMD 0xa4
315
316/* HD commands and notifications */
317#define REPLY_HD_PARAMS_CMD 0xa6
318#define HD_PARAMS_NOTIFICATION 0xa7
319#define SENSITIVITY_CMD 0xa8
320#define U_APSD_PARAMS_CMD 0xa9
321#define NOISY_PLATFORM_CMD 0xaa
322#define ILLEGAL_CMD 0xac
323#define REPLY_PHY_CALIBRATION_CMD 0xb0
324#define REPLAY_RX_GAIN_CALIB_CMD 0xb1
325
326/* WiPAN commands */
327#define REPLY_WIPAN_PARAMS_CMD 0xb2
328#define REPLY_WIPAN_RX_ON_CMD 0xb3
329#define REPLY_WIPAN_RX_ON_TIMING 0xb4
330#define REPLY_WIPAN_TX_PWR_TABLE_CMD 0xb5
331#define REPLY_WIPAN_RXON_ASSOC_CMD 0xb6
332#define REPLY_WIPAN_QOS_PARAM 0xb7
333#define WIPAN_REPLY_WEPKEY 0xb8
334
335/* BeamForming commands */
336#define BEAMFORMER_CFG_CMD 0xba
337#define BEAMFORMEE_NOTIFICATION 0xbb
338
339/* TGn new Commands */
340#define REPLY_RX_PHY_CMD 0xc0
341#define REPLY_RX_MPDU_CMD 0xc1
342#define REPLY_MULTICAST_HASH 0xc2
343#define REPLY_KDR_RX 0xc3
344#define REPLY_RX_DSP_EXT_INFO 0xc4
345#define REPLY_COMPRESSED_BA 0xc5
346
347/* PNC commands */
348#define PNC_CONFIG_CMD 0xc8
349#define PNC_UPDATE_TABLE_CMD 0xc9
350#define XVT_GENERAL_CTRL_CMD 0xca
351#define REPLY_LEGACY_RADIO_FE 0xdd
352
353/* WoWLAN commands */
354#define WOWLAN_PATTERNS 0xe0
355#define WOWLAN_WAKEUP_FILTER 0xe1
356#define WOWLAN_TSC_RSC_PARAM 0xe2
357#define WOWLAN_TKIP_PARAM 0xe3
358#define WOWLAN_KEK_KCK_MATERIAL 0xe4
359#define WOWLAN_GET_STATUSES 0xe5
360#define WOWLAN_TX_POWER_PER_DB 0xe6
361#define REPLY_WOWLAN_GET_STATUSES WOWLAN_GET_STATUSES
362
363#define REPLY_DEBUG_CMD 0xf0
364#define REPLY_DSP_DEBUG_CMD 0xf1
365#define REPLY_DEBUG_MONITOR_CMD 0xf2
366#define REPLY_DEBUG_XVT_CMD 0xf3
367#define REPLY_DEBUG_DC_CALIB 0xf4
368#define REPLY_DYNAMIC_BP 0xf5
369
370/* General purpose Commands */
371#define REPLY_GP1_CMD 0xfa
372#define REPLY_GP2_CMD 0xfb
373#define REPLY_GP3_CMD 0xfc
374#define REPLY_GP4_CMD 0xfd
375#define REPLY_REPLAY_WRAPPER 0xfe
376#define REPLY_FRAME_DURATION_CALC_CMD 0xff
377
378#define LMAC_COMMAND_ID_MAX 0xff
379#define LMAC_COMMAND_ID_NUM (LMAC_COMMAND_ID_MAX + 1)
380
381
382/* Calibration */
383
384enum {
385 PHY_CALIBRATE_DC_CMD = 0,
386 PHY_CALIBRATE_LO_CMD = 1,
387 PHY_CALIBRATE_RX_BB_CMD = 2,
388 PHY_CALIBRATE_TX_IQ_CMD = 3,
389 PHY_CALIBRATE_RX_IQ_CMD = 4,
390 PHY_CALIBRATION_NOISE_CMD = 5,
391 PHY_CALIBRATE_AGC_TABLE_CMD = 6,
392 PHY_CALIBRATE_CRYSTAL_FRQ_CMD = 7,
393 PHY_CALIBRATE_OPCODES_NUM,
394 SHILOH_PHY_CALIBRATE_DC_CMD = 8,
395 SHILOH_PHY_CALIBRATE_LO_CMD = 9,
396 SHILOH_PHY_CALIBRATE_RX_BB_CMD = 10,
397 SHILOH_PHY_CALIBRATE_TX_IQ_CMD = 11,
398 SHILOH_PHY_CALIBRATE_RX_IQ_CMD = 12,
399 SHILOH_PHY_CALIBRATION_NOISE_CMD = 13,
400 SHILOH_PHY_CALIBRATE_AGC_TABLE_CMD = 14,
401 SHILOH_PHY_CALIBRATE_CRYSTAL_FRQ_CMD = 15,
402 SHILOH_PHY_CALIBRATE_BASE_BAND_CMD = 16,
403 SHILOH_PHY_CALIBRATE_TXIQ_PERIODIC_CMD = 17,
404 CALIBRATION_CMD_NUM,
405};
406
407enum {
408 CALIB_CFG_RX_BB_IDX = 0,
409 CALIB_CFG_DC_IDX = 1,
410 CALIB_CFG_LO_IDX = 2,
411 CALIB_CFG_TX_IQ_IDX = 3,
412 CALIB_CFG_RX_IQ_IDX = 4,
413 CALIB_CFG_NOISE_IDX = 5,
414 CALIB_CFG_CRYSTAL_IDX = 6,
415 CALIB_CFG_TEMPERATURE_IDX = 7,
416 CALIB_CFG_PAPD_IDX = 8,
417 CALIB_CFG_LAST_IDX = CALIB_CFG_PAPD_IDX,
418 CALIB_CFG_MODULE_NUM,
419};
420
421#define IWM_CALIB_MAP_INIT_MSK 0xFFFF
422#define IWM_CALIB_MAP_PER_LMAC(m) ((m & 0xFF0000) >> 16)
423#define IWM_CALIB_MAP_PER_UMAC(m) ((m & 0xFF000000) >> 24)
424#define IWM_CALIB_OPCODE_TO_INDEX(op) (op - PHY_CALIBRATE_OPCODES_NUM)
425
426struct iwm_lmac_calib_hdr {
427 u8 opcode;
428 u8 first_grp;
429 u8 grp_num;
430 u8 all_data_valid;
431} __packed;
432
433#define IWM_LMAC_CALIB_FREQ_GROUPS_NR 7
434#define IWM_CALIB_FREQ_GROUPS_NR 5
435#define IWM_CALIB_DC_MODES_NR 12
436
437struct iwm_calib_rxiq_entry {
438 u16 ptam_postdist_ars;
439 u16 ptam_postdist_arc;
440} __packed;
441
442struct iwm_calib_rxiq_group {
443 struct iwm_calib_rxiq_entry mode[IWM_CALIB_DC_MODES_NR];
444} __packed;
445
446struct iwm_lmac_calib_rxiq {
447 struct iwm_calib_rxiq_group group[IWM_LMAC_CALIB_FREQ_GROUPS_NR];
448} __packed;
449
450struct iwm_calib_rxiq {
451 struct iwm_lmac_calib_hdr hdr;
452 struct iwm_calib_rxiq_group group[IWM_CALIB_FREQ_GROUPS_NR];
453} __packed;
454
455#define LMAC_STA_ID_SEED 0x0f
456#define LMAC_STA_ID_POS 0
457
458#define LMAC_STA_COLOR_SEED 0x7
459#define LMAC_STA_COLOR_POS 4
460
461struct iwm_lmac_power_report {
462 u8 pa_status;
463 u8 pa_integ_res_A[3];
464 u8 pa_integ_res_B[3];
465 u8 pa_integ_res_C[3];
466} __packed;
467
468struct iwm_lmac_tx_resp {
469 u8 frame_cnt; /* 1-no aggregation, greater then 1 - aggregation */
470 u8 bt_kill_cnt;
471 __le16 retry_cnt;
472 __le32 initial_tx_rate;
473 __le16 wireless_media_time;
474 struct iwm_lmac_power_report power_report;
475 __le32 tfd_info;
476 __le16 seq_ctl;
477 __le16 byte_cnt;
478 u8 tlc_rate_info;
479 u8 ra_tid;
480 __le16 frame_ctl;
481 __le32 status;
482} __packed;
483
484#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c
deleted file mode 100644
index 1f868b166d10..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/main.c
+++ /dev/null
@@ -1,847 +0,0 @@
1/*
2 * Intel Wireless Multicomm 3200 WiFi driver
3 *
4 * Copyright (C) 2009 Intel Corporation. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 *
33 * Intel Corporation <ilw@linux.intel.com>
34 * Samuel Ortiz <samuel.ortiz@intel.com>
35 * Zhu Yi <yi.zhu@intel.com>
36 *
37 */
38
39#include <linux/kernel.h>
40#include <linux/netdevice.h>
41#include <linux/sched.h>
42#include <linux/ieee80211.h>
43#include <linux/wireless.h>
44#include <linux/slab.h>
45#include <linux/moduleparam.h>
46
47#include "iwm.h"
48#include "debug.h"
49#include "bus.h"
50#include "umac.h"
51#include "commands.h"
52#include "hal.h"
53#include "fw.h"
54#include "rx.h"
55
56static struct iwm_conf def_iwm_conf = {
57
58 .sdio_ior_timeout = 5000,
59 .calib_map = BIT(CALIB_CFG_DC_IDX) |
60 BIT(CALIB_CFG_LO_IDX) |
61 BIT(CALIB_CFG_TX_IQ_IDX) |
62 BIT(CALIB_CFG_RX_IQ_IDX) |
63 BIT(SHILOH_PHY_CALIBRATE_BASE_BAND_CMD),
64 .expected_calib_map = BIT(PHY_CALIBRATE_DC_CMD) |
65 BIT(PHY_CALIBRATE_LO_CMD) |
66 BIT(PHY_CALIBRATE_TX_IQ_CMD) |
67 BIT(PHY_CALIBRATE_RX_IQ_CMD) |
68 BIT(SHILOH_PHY_CALIBRATE_BASE_BAND_CMD),
69 .ct_kill_entry = 110,
70 .ct_kill_exit = 110,
71 .reset_on_fatal_err = 1,
72 .auto_connect = 1,
73 .enable_qos = 1,
74 .mode = UMAC_MODE_BSS,
75
76 /* UMAC configuration */
77 .power_index = 0,
78 .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD,
79 .rts_threshold = IEEE80211_MAX_RTS_THRESHOLD,
80 .cts_to_self = 0,
81
82 .assoc_timeout = 2,
83 .roam_timeout = 10,
84 .wireless_mode = WIRELESS_MODE_11A | WIRELESS_MODE_11G |
85 WIRELESS_MODE_11N,
86
87 /* IBSS */
88 .ibss_band = UMAC_BAND_2GHZ,
89 .ibss_channel = 1,
90
91 .mac_addr = {0x00, 0x02, 0xb3, 0x01, 0x02, 0x03},
92};
93
94static bool modparam_reset;
95module_param_named(reset, modparam_reset, bool, 0644);
96MODULE_PARM_DESC(reset, "reset on firmware errors (default 0 [not reset])");
97
98static bool modparam_wimax_enable = true;
99module_param_named(wimax_enable, modparam_wimax_enable, bool, 0644);
100MODULE_PARM_DESC(wimax_enable, "Enable wimax core (default 1 [wimax enabled])");
101
102int iwm_mode_to_nl80211_iftype(int mode)
103{
104 switch (mode) {
105 case UMAC_MODE_BSS:
106 return NL80211_IFTYPE_STATION;
107 case UMAC_MODE_IBSS:
108 return NL80211_IFTYPE_ADHOC;
109 default:
110 return NL80211_IFTYPE_UNSPECIFIED;
111 }
112
113 return 0;
114}
115
116static void iwm_statistics_request(struct work_struct *work)
117{
118 struct iwm_priv *iwm =
119 container_of(work, struct iwm_priv, stats_request.work);
120
121 iwm_send_umac_stats_req(iwm, 0);
122}
123
124static void iwm_disconnect_work(struct work_struct *work)
125{
126 struct iwm_priv *iwm =
127 container_of(work, struct iwm_priv, disconnect.work);
128
129 if (iwm->umac_profile_active)
130 iwm_invalidate_mlme_profile(iwm);
131
132 clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
133 iwm->umac_profile_active = false;
134 memset(iwm->bssid, 0, ETH_ALEN);
135 iwm->channel = 0;
136
137 iwm_link_off(iwm);
138
139 wake_up_interruptible(&iwm->mlme_queue);
140
141 cfg80211_disconnected(iwm_to_ndev(iwm), 0, NULL, 0, GFP_KERNEL);
142}
143
144static void iwm_ct_kill_work(struct work_struct *work)
145{
146 struct iwm_priv *iwm =
147 container_of(work, struct iwm_priv, ct_kill_delay.work);
148 struct wiphy *wiphy = iwm_to_wiphy(iwm);
149
150 IWM_INFO(iwm, "CT kill delay timeout\n");
151
152 wiphy_rfkill_set_hw_state(wiphy, false);
153}
154
155static int __iwm_up(struct iwm_priv *iwm);
156static int __iwm_down(struct iwm_priv *iwm);
157
158static void iwm_reset_worker(struct work_struct *work)
159{
160 struct iwm_priv *iwm;
161 struct iwm_umac_profile *profile = NULL;
162 int uninitialized_var(ret), retry = 0;
163
164 iwm = container_of(work, struct iwm_priv, reset_worker);
165
166 /*
167 * XXX: The iwm->mutex is introduced purely for this reset work,
168 * because the other users for iwm_up and iwm_down are only netdev
169 * ndo_open and ndo_stop which are already protected by rtnl.
170 * Please remove iwm->mutex together if iwm_reset_worker() is not
171 * required in the future.
172 */
173 if (!mutex_trylock(&iwm->mutex)) {
174 IWM_WARN(iwm, "We are in the middle of interface bringing "
175 "UP/DOWN. Skip driver resetting.\n");
176 return;
177 }
178
179 if (iwm->umac_profile_active) {
180 profile = kmalloc(sizeof(struct iwm_umac_profile), GFP_KERNEL);
181 if (profile)
182 memcpy(profile, iwm->umac_profile, sizeof(*profile));
183 else
184 IWM_ERR(iwm, "Couldn't alloc memory for profile\n");
185 }
186
187 __iwm_down(iwm);
188
189 while (retry++ < 3) {
190 ret = __iwm_up(iwm);
191 if (!ret)
192 break;
193
194 schedule_timeout_uninterruptible(10 * HZ);
195 }
196
197 if (ret) {
198 IWM_WARN(iwm, "iwm_up() failed: %d\n", ret);
199
200 kfree(profile);
201 goto out;
202 }
203
204 if (profile) {
205 IWM_DBG_MLME(iwm, DBG, "Resend UMAC profile\n");
206 memcpy(iwm->umac_profile, profile, sizeof(*profile));
207 iwm_send_mlme_profile(iwm);
208 kfree(profile);
209 } else
210 clear_bit(IWM_STATUS_RESETTING, &iwm->status);
211
212 out:
213 mutex_unlock(&iwm->mutex);
214}
215
216static void iwm_auth_retry_worker(struct work_struct *work)
217{
218 struct iwm_priv *iwm;
219 int i, ret;
220
221 iwm = container_of(work, struct iwm_priv, auth_retry_worker);
222 if (iwm->umac_profile_active) {
223 ret = iwm_invalidate_mlme_profile(iwm);
224 if (ret < 0)
225 return;
226 }
227
228 iwm->umac_profile->sec.auth_type = UMAC_AUTH_TYPE_LEGACY_PSK;
229
230 ret = iwm_send_mlme_profile(iwm);
231 if (ret < 0)
232 return;
233
234 for (i = 0; i < IWM_NUM_KEYS; i++)
235 if (iwm->keys[i].key_len)
236 iwm_set_key(iwm, 0, &iwm->keys[i]);
237
238 iwm_set_tx_key(iwm, iwm->default_key);
239}
240
241
242
243static void iwm_watchdog(unsigned long data)
244{
245 struct iwm_priv *iwm = (struct iwm_priv *)data;
246
247 IWM_WARN(iwm, "Watchdog expired: UMAC stalls!\n");
248
249 if (modparam_reset)
250 iwm_resetting(iwm);
251}
252
253int iwm_priv_init(struct iwm_priv *iwm)
254{
255 int i, j;
256 char name[32];
257
258 iwm->status = 0;
259 INIT_LIST_HEAD(&iwm->pending_notif);
260 init_waitqueue_head(&iwm->notif_queue);
261 init_waitqueue_head(&iwm->nonwifi_queue);
262 init_waitqueue_head(&iwm->wifi_ntfy_queue);
263 init_waitqueue_head(&iwm->mlme_queue);
264 memcpy(&iwm->conf, &def_iwm_conf, sizeof(struct iwm_conf));
265 spin_lock_init(&iwm->tx_credit.lock);
266 INIT_LIST_HEAD(&iwm->wifi_pending_cmd);
267 INIT_LIST_HEAD(&iwm->nonwifi_pending_cmd);
268 iwm->wifi_seq_num = UMAC_WIFI_SEQ_NUM_BASE;
269 iwm->nonwifi_seq_num = UMAC_NONWIFI_SEQ_NUM_BASE;
270 spin_lock_init(&iwm->cmd_lock);
271 iwm->scan_id = 1;
272 INIT_DELAYED_WORK(&iwm->stats_request, iwm_statistics_request);
273 INIT_DELAYED_WORK(&iwm->disconnect, iwm_disconnect_work);
274 INIT_DELAYED_WORK(&iwm->ct_kill_delay, iwm_ct_kill_work);
275 INIT_WORK(&iwm->reset_worker, iwm_reset_worker);
276 INIT_WORK(&iwm->auth_retry_worker, iwm_auth_retry_worker);
277 INIT_LIST_HEAD(&iwm->bss_list);
278
279 skb_queue_head_init(&iwm->rx_list);
280 INIT_LIST_HEAD(&iwm->rx_tickets);
281 spin_lock_init(&iwm->ticket_lock);
282 for (i = 0; i < IWM_RX_ID_HASH; i++) {
283 INIT_LIST_HEAD(&iwm->rx_packets[i]);
284 spin_lock_init(&iwm->packet_lock[i]);
285 }
286
287 INIT_WORK(&iwm->rx_worker, iwm_rx_worker);
288
289 iwm->rx_wq = create_singlethread_workqueue(KBUILD_MODNAME "_rx");
290 if (!iwm->rx_wq)
291 return -EAGAIN;
292
293 for (i = 0; i < IWM_TX_QUEUES; i++) {
294 INIT_WORK(&iwm->txq[i].worker, iwm_tx_worker);
295 snprintf(name, 32, KBUILD_MODNAME "_tx_%d", i);
296 iwm->txq[i].id = i;
297 iwm->txq[i].wq = create_singlethread_workqueue(name);
298 if (!iwm->txq[i].wq)
299 return -EAGAIN;
300
301 skb_queue_head_init(&iwm->txq[i].queue);
302 skb_queue_head_init(&iwm->txq[i].stopped_queue);
303 spin_lock_init(&iwm->txq[i].lock);
304 }
305
306 for (i = 0; i < IWM_NUM_KEYS; i++)
307 memset(&iwm->keys[i], 0, sizeof(struct iwm_key));
308
309 iwm->default_key = -1;
310
311 for (i = 0; i < IWM_STA_TABLE_NUM; i++)
312 for (j = 0; j < IWM_UMAC_TID_NR; j++) {
313 mutex_init(&iwm->sta_table[i].tid_info[j].mutex);
314 iwm->sta_table[i].tid_info[j].stopped = false;
315 }
316
317 init_timer(&iwm->watchdog);
318 iwm->watchdog.function = iwm_watchdog;
319 iwm->watchdog.data = (unsigned long)iwm;
320 mutex_init(&iwm->mutex);
321
322 iwm->last_fw_err = kzalloc(sizeof(struct iwm_fw_error_hdr),
323 GFP_KERNEL);
324 if (iwm->last_fw_err == NULL)
325 return -ENOMEM;
326
327 return 0;
328}
329
330void iwm_priv_deinit(struct iwm_priv *iwm)
331{
332 int i;
333
334 for (i = 0; i < IWM_TX_QUEUES; i++)
335 destroy_workqueue(iwm->txq[i].wq);
336
337 destroy_workqueue(iwm->rx_wq);
338 kfree(iwm->last_fw_err);
339}
340
341/*
342 * We reset all the structures, and we reset the UMAC.
343 * After calling this routine, you're expected to reload
344 * the firmware.
345 */
346void iwm_reset(struct iwm_priv *iwm)
347{
348 struct iwm_notif *notif, *next;
349
350 if (test_bit(IWM_STATUS_READY, &iwm->status))
351 iwm_target_reset(iwm);
352
353 if (test_bit(IWM_STATUS_RESETTING, &iwm->status)) {
354 iwm->status = 0;
355 set_bit(IWM_STATUS_RESETTING, &iwm->status);
356 } else
357 iwm->status = 0;
358 iwm->scan_id = 1;
359
360 list_for_each_entry_safe(notif, next, &iwm->pending_notif, pending) {
361 list_del(&notif->pending);
362 kfree(notif->buf);
363 kfree(notif);
364 }
365
366 iwm_cmd_flush(iwm);
367
368 flush_workqueue(iwm->rx_wq);
369
370 iwm_link_off(iwm);
371}
372
373void iwm_resetting(struct iwm_priv *iwm)
374{
375 set_bit(IWM_STATUS_RESETTING, &iwm->status);
376
377 schedule_work(&iwm->reset_worker);
378}
379
380/*
381 * Notification code:
382 *
383 * We're faced with the following issue: Any host command can
384 * have an answer or not, and if there's an answer to expect,
385 * it can be treated synchronously or asynchronously.
386 * To work around the synchronous answer case, we implemented
387 * our notification mechanism.
388 * When a code path needs to wait for a command response
389 * synchronously, it calls notif_handle(), which waits for the
390 * right notification to show up, and then process it. Before
391 * starting to wait, it registered as a waiter for this specific
392 * answer (by toggling a bit in on of the handler_map), so that
393 * the rx code knows that it needs to send a notification to the
394 * waiting processes. It does so by calling iwm_notif_send(),
395 * which adds the notification to the pending notifications list,
396 * and then wakes the waiting processes up.
397 */
398int iwm_notif_send(struct iwm_priv *iwm, struct iwm_wifi_cmd *cmd,
399 u8 cmd_id, u8 source, u8 *buf, unsigned long buf_size)
400{
401 struct iwm_notif *notif;
402
403 notif = kzalloc(sizeof(struct iwm_notif), GFP_KERNEL);
404 if (!notif) {
405 IWM_ERR(iwm, "Couldn't alloc memory for notification\n");
406 return -ENOMEM;
407 }
408
409 INIT_LIST_HEAD(&notif->pending);
410 notif->cmd = cmd;
411 notif->cmd_id = cmd_id;
412 notif->src = source;
413 notif->buf = kzalloc(buf_size, GFP_KERNEL);
414 if (!notif->buf) {
415 IWM_ERR(iwm, "Couldn't alloc notification buffer\n");
416 kfree(notif);
417 return -ENOMEM;
418 }
419 notif->buf_size = buf_size;
420 memcpy(notif->buf, buf, buf_size);
421 list_add_tail(&notif->pending, &iwm->pending_notif);
422
423 wake_up_interruptible(&iwm->notif_queue);
424
425 return 0;
426}
427
428static struct iwm_notif *iwm_notif_find(struct iwm_priv *iwm, u32 cmd,
429 u8 source)
430{
431 struct iwm_notif *notif;
432
433 list_for_each_entry(notif, &iwm->pending_notif, pending) {
434 if ((notif->cmd_id == cmd) && (notif->src == source)) {
435 list_del(&notif->pending);
436 return notif;
437 }
438 }
439
440 return NULL;
441}
442
443static struct iwm_notif *iwm_notif_wait(struct iwm_priv *iwm, u32 cmd,
444 u8 source, long timeout)
445{
446 int ret;
447 struct iwm_notif *notif;
448 unsigned long *map = NULL;
449
450 switch (source) {
451 case IWM_SRC_LMAC:
452 map = &iwm->lmac_handler_map[0];
453 break;
454 case IWM_SRC_UMAC:
455 map = &iwm->umac_handler_map[0];
456 break;
457 case IWM_SRC_UDMA:
458 map = &iwm->udma_handler_map[0];
459 break;
460 }
461
462 set_bit(cmd, map);
463
464 ret = wait_event_interruptible_timeout(iwm->notif_queue,
465 ((notif = iwm_notif_find(iwm, cmd, source)) != NULL),
466 timeout);
467 clear_bit(cmd, map);
468
469 if (!ret)
470 return NULL;
471
472 return notif;
473}
474
475int iwm_notif_handle(struct iwm_priv *iwm, u32 cmd, u8 source, long timeout)
476{
477 int ret;
478 struct iwm_notif *notif;
479
480 notif = iwm_notif_wait(iwm, cmd, source, timeout);
481 if (!notif)
482 return -ETIME;
483
484 ret = iwm_rx_handle_resp(iwm, notif->buf, notif->buf_size, notif->cmd);
485 kfree(notif->buf);
486 kfree(notif);
487
488 return ret;
489}
490
491static int iwm_config_boot_params(struct iwm_priv *iwm)
492{
493 struct iwm_udma_nonwifi_cmd target_cmd;
494 int ret;
495
496 /* check Wimax is off and config debug monitor */
497 if (!modparam_wimax_enable) {
498 u32 data1 = 0x1f;
499 u32 addr1 = 0x606BE258;
500
501 u32 data2_set = 0x0;
502 u32 data2_clr = 0x1;
503 u32 addr2 = 0x606BE100;
504
505 u32 data3 = 0x1;
506 u32 addr3 = 0x606BEC00;
507
508 target_cmd.resp = 0;
509 target_cmd.handle_by_hw = 0;
510 target_cmd.eop = 1;
511
512 target_cmd.opcode = UMAC_HDI_OUT_OPCODE_WRITE;
513 target_cmd.addr = cpu_to_le32(addr1);
514 target_cmd.op1_sz = cpu_to_le32(sizeof(u32));
515 target_cmd.op2 = 0;
516
517 ret = iwm_hal_send_target_cmd(iwm, &target_cmd, &data1);
518 if (ret < 0) {
519 IWM_ERR(iwm, "iwm_hal_send_target_cmd failed\n");
520 return ret;
521 }
522
523 target_cmd.opcode = UMAC_HDI_OUT_OPCODE_READ_MODIFY_WRITE;
524 target_cmd.addr = cpu_to_le32(addr2);
525 target_cmd.op1_sz = cpu_to_le32(data2_set);
526 target_cmd.op2 = cpu_to_le32(data2_clr);
527
528 ret = iwm_hal_send_target_cmd(iwm, &target_cmd, &data1);
529 if (ret < 0) {
530 IWM_ERR(iwm, "iwm_hal_send_target_cmd failed\n");
531 return ret;
532 }
533
534 target_cmd.opcode = UMAC_HDI_OUT_OPCODE_WRITE;
535 target_cmd.addr = cpu_to_le32(addr3);
536 target_cmd.op1_sz = cpu_to_le32(sizeof(u32));
537 target_cmd.op2 = 0;
538
539 ret = iwm_hal_send_target_cmd(iwm, &target_cmd, &data3);
540 if (ret < 0) {
541 IWM_ERR(iwm, "iwm_hal_send_target_cmd failed\n");
542 return ret;
543 }
544 }
545
546 return 0;
547}
548
549void iwm_init_default_profile(struct iwm_priv *iwm,
550 struct iwm_umac_profile *profile)
551{
552 memset(profile, 0, sizeof(struct iwm_umac_profile));
553
554 profile->sec.auth_type = UMAC_AUTH_TYPE_OPEN;
555 profile->sec.flags = UMAC_SEC_FLG_LEGACY_PROFILE;
556 profile->sec.ucast_cipher = UMAC_CIPHER_TYPE_NONE;
557 profile->sec.mcast_cipher = UMAC_CIPHER_TYPE_NONE;
558
559 if (iwm->conf.enable_qos)
560 profile->flags |= cpu_to_le16(UMAC_PROFILE_QOS_ALLOWED);
561
562 profile->wireless_mode = iwm->conf.wireless_mode;
563 profile->mode = cpu_to_le32(iwm->conf.mode);
564
565 profile->ibss.atim = 0;
566 profile->ibss.beacon_interval = 100;
567 profile->ibss.join_only = 0;
568 profile->ibss.band = iwm->conf.ibss_band;
569 profile->ibss.channel = iwm->conf.ibss_channel;
570}
571
572void iwm_link_on(struct iwm_priv *iwm)
573{
574 netif_carrier_on(iwm_to_ndev(iwm));
575 netif_tx_wake_all_queues(iwm_to_ndev(iwm));
576
577 iwm_send_umac_stats_req(iwm, 0);
578}
579
580void iwm_link_off(struct iwm_priv *iwm)
581{
582 struct iw_statistics *wstats = &iwm->wstats;
583 int i;
584
585 netif_tx_stop_all_queues(iwm_to_ndev(iwm));
586 netif_carrier_off(iwm_to_ndev(iwm));
587
588 for (i = 0; i < IWM_TX_QUEUES; i++) {
589 skb_queue_purge(&iwm->txq[i].queue);
590 skb_queue_purge(&iwm->txq[i].stopped_queue);
591
592 iwm->txq[i].concat_count = 0;
593 iwm->txq[i].concat_ptr = iwm->txq[i].concat_buf;
594
595 flush_workqueue(iwm->txq[i].wq);
596 }
597
598 iwm_rx_free(iwm);
599
600 cancel_delayed_work_sync(&iwm->stats_request);
601 memset(wstats, 0, sizeof(struct iw_statistics));
602 wstats->qual.updated = IW_QUAL_ALL_INVALID;
603
604 kfree(iwm->req_ie);
605 iwm->req_ie = NULL;
606 iwm->req_ie_len = 0;
607 kfree(iwm->resp_ie);
608 iwm->resp_ie = NULL;
609 iwm->resp_ie_len = 0;
610
611 del_timer_sync(&iwm->watchdog);
612}
613
614static void iwm_bss_list_clean(struct iwm_priv *iwm)
615{
616 struct iwm_bss_info *bss, *next;
617
618 list_for_each_entry_safe(bss, next, &iwm->bss_list, node) {
619 list_del(&bss->node);
620 kfree(bss->bss);
621 kfree(bss);
622 }
623}
624
625static int iwm_channels_init(struct iwm_priv *iwm)
626{
627 int ret;
628
629 ret = iwm_send_umac_channel_list(iwm);
630 if (ret) {
631 IWM_ERR(iwm, "Send channel list failed\n");
632 return ret;
633 }
634
635 ret = iwm_notif_handle(iwm, UMAC_CMD_OPCODE_GET_CHAN_INFO_LIST,
636 IWM_SRC_UMAC, WAIT_NOTIF_TIMEOUT);
637 if (ret) {
638 IWM_ERR(iwm, "Didn't get a channel list notification\n");
639 return ret;
640 }
641
642 return 0;
643}
644
645static int __iwm_up(struct iwm_priv *iwm)
646{
647 int ret;
648 struct iwm_notif *notif_reboot, *notif_ack = NULL;
649 struct wiphy *wiphy = iwm_to_wiphy(iwm);
650 u32 wireless_mode;
651
652 ret = iwm_bus_enable(iwm);
653 if (ret) {
654 IWM_ERR(iwm, "Couldn't enable function\n");
655 return ret;
656 }
657
658 iwm_rx_setup_handlers(iwm);
659
660 /* Wait for initial BARKER_REBOOT from hardware */
661 notif_reboot = iwm_notif_wait(iwm, IWM_BARKER_REBOOT_NOTIFICATION,
662 IWM_SRC_UDMA, 2 * HZ);
663 if (!notif_reboot) {
664 IWM_ERR(iwm, "Wait for REBOOT_BARKER timeout\n");
665 goto err_disable;
666 }
667
668 /* We send the barker back */
669 ret = iwm_bus_send_chunk(iwm, notif_reboot->buf, 16);
670 if (ret) {
671 IWM_ERR(iwm, "REBOOT barker response failed\n");
672 kfree(notif_reboot);
673 goto err_disable;
674 }
675
676 kfree(notif_reboot->buf);
677 kfree(notif_reboot);
678
679 /* Wait for ACK_BARKER from hardware */
680 notif_ack = iwm_notif_wait(iwm, IWM_ACK_BARKER_NOTIFICATION,
681 IWM_SRC_UDMA, 2 * HZ);
682 if (!notif_ack) {
683 IWM_ERR(iwm, "Wait for ACK_BARKER timeout\n");
684 goto err_disable;
685 }
686
687 kfree(notif_ack->buf);
688 kfree(notif_ack);
689
690 /* We start to config static boot parameters */
691 ret = iwm_config_boot_params(iwm);
692 if (ret) {
693 IWM_ERR(iwm, "Config boot parameters failed\n");
694 goto err_disable;
695 }
696
697 ret = iwm_read_mac(iwm, iwm_to_ndev(iwm)->dev_addr);
698 if (ret) {
699 IWM_ERR(iwm, "MAC reading failed\n");
700 goto err_disable;
701 }
702 memcpy(iwm_to_ndev(iwm)->perm_addr, iwm_to_ndev(iwm)->dev_addr,
703 ETH_ALEN);
704
705 /* We can load the FWs */
706 ret = iwm_load_fw(iwm);
707 if (ret) {
708 IWM_ERR(iwm, "FW loading failed\n");
709 goto err_disable;
710 }
711
712 ret = iwm_eeprom_fat_channels(iwm);
713 if (ret) {
714 IWM_ERR(iwm, "Couldnt read HT channels EEPROM entries\n");
715 goto err_fw;
716 }
717
718 /*
719 * Read our SKU capabilities.
720 * If it's valid, we AND the configured wireless mode with the
721 * device EEPROM value as the current profile wireless mode.
722 */
723 wireless_mode = iwm_eeprom_wireless_mode(iwm);
724 if (wireless_mode) {
725 iwm->conf.wireless_mode &= wireless_mode;
726 if (iwm->umac_profile)
727 iwm->umac_profile->wireless_mode =
728 iwm->conf.wireless_mode;
729 } else
730 IWM_ERR(iwm, "Wrong SKU capabilities: 0x%x\n",
731 *((u16 *)iwm_eeprom_access(iwm, IWM_EEPROM_SKU_CAP)));
732
733 snprintf(wiphy->fw_version, sizeof(wiphy->fw_version), "L%s_U%s",
734 iwm->lmac_version, iwm->umac_version);
735
736 /* We configure the UMAC and enable the wifi module */
737 ret = iwm_send_umac_config(iwm,
738 cpu_to_le32(UMAC_RST_CTRL_FLG_WIFI_CORE_EN) |
739 cpu_to_le32(UMAC_RST_CTRL_FLG_WIFI_LINK_EN) |
740 cpu_to_le32(UMAC_RST_CTRL_FLG_WIFI_MLME_EN));
741 if (ret) {
742 IWM_ERR(iwm, "UMAC config failed\n");
743 goto err_fw;
744 }
745
746 ret = iwm_notif_handle(iwm, UMAC_NOTIFY_OPCODE_WIFI_CORE_STATUS,
747 IWM_SRC_UMAC, WAIT_NOTIF_TIMEOUT);
748 if (ret) {
749 IWM_ERR(iwm, "Didn't get a wifi core status notification\n");
750 goto err_fw;
751 }
752
753 if (iwm->core_enabled != (UMAC_NTFY_WIFI_CORE_STATUS_LINK_EN |
754 UMAC_NTFY_WIFI_CORE_STATUS_MLME_EN)) {
755 IWM_DBG_BOOT(iwm, DBG, "Not all cores enabled:0x%x\n",
756 iwm->core_enabled);
757 ret = iwm_notif_handle(iwm, UMAC_NOTIFY_OPCODE_WIFI_CORE_STATUS,
758 IWM_SRC_UMAC, WAIT_NOTIF_TIMEOUT);
759 if (ret) {
760 IWM_ERR(iwm, "Didn't get a core status notification\n");
761 goto err_fw;
762 }
763
764 if (iwm->core_enabled != (UMAC_NTFY_WIFI_CORE_STATUS_LINK_EN |
765 UMAC_NTFY_WIFI_CORE_STATUS_MLME_EN)) {
766 IWM_ERR(iwm, "Not all cores enabled: 0x%x\n",
767 iwm->core_enabled);
768 goto err_fw;
769 } else {
770 IWM_INFO(iwm, "All cores enabled\n");
771 }
772 }
773
774 ret = iwm_channels_init(iwm);
775 if (ret < 0) {
776 IWM_ERR(iwm, "Couldn't init channels\n");
777 goto err_fw;
778 }
779
780 /* Set the READY bit to indicate interface is brought up successfully */
781 set_bit(IWM_STATUS_READY, &iwm->status);
782
783 return 0;
784
785 err_fw:
786 iwm_eeprom_exit(iwm);
787
788 err_disable:
789 ret = iwm_bus_disable(iwm);
790 if (ret < 0)
791 IWM_ERR(iwm, "Couldn't disable function\n");
792
793 return -EIO;
794}
795
796int iwm_up(struct iwm_priv *iwm)
797{
798 int ret;
799
800 mutex_lock(&iwm->mutex);
801 ret = __iwm_up(iwm);
802 mutex_unlock(&iwm->mutex);
803
804 return ret;
805}
806
807static int __iwm_down(struct iwm_priv *iwm)
808{
809 int ret;
810
811 /* The interface is already down */
812 if (!test_bit(IWM_STATUS_READY, &iwm->status))
813 return 0;
814
815 if (iwm->scan_request) {
816 cfg80211_scan_done(iwm->scan_request, true);
817 iwm->scan_request = NULL;
818 }
819
820 clear_bit(IWM_STATUS_READY, &iwm->status);
821
822 iwm_eeprom_exit(iwm);
823 iwm_bss_list_clean(iwm);
824 iwm_init_default_profile(iwm, iwm->umac_profile);
825 iwm->umac_profile_active = false;
826 iwm->default_key = -1;
827 iwm->core_enabled = 0;
828
829 ret = iwm_bus_disable(iwm);
830 if (ret < 0) {
831 IWM_ERR(iwm, "Couldn't disable function\n");
832 return ret;
833 }
834
835 return 0;
836}
837
838int iwm_down(struct iwm_priv *iwm)
839{
840 int ret;
841
842 mutex_lock(&iwm->mutex);
843 ret = __iwm_down(iwm);
844 mutex_unlock(&iwm->mutex);
845
846 return ret;
847}
diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c
deleted file mode 100644
index 5091d77e02ce..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/netdev.c
+++ /dev/null
@@ -1,191 +0,0 @@
1/*
2 * Intel Wireless Multicomm 3200 WiFi driver
3 *
4 * Copyright (C) 2009 Intel Corporation <ilw@linux.intel.com>
5 * Samuel Ortiz <samuel.ortiz@intel.com>
6 * Zhu Yi <yi.zhu@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 */
23
24/*
25 * This is the netdev related hooks for iwm.
26 *
27 * Some interesting code paths:
28 *
29 * iwm_open() (Called at netdev interface bringup time)
30 * -> iwm_up() (main.c)
31 * -> iwm_bus_enable()
32 * -> if_sdio_enable() (In case of an SDIO bus)
33 * -> sdio_enable_func()
34 * -> iwm_notif_wait(BARKER_REBOOT) (wait for reboot barker)
35 * -> iwm_notif_wait(ACK_BARKER) (wait for ACK barker)
36 * -> iwm_load_fw() (fw.c)
37 * -> iwm_load_umac()
38 * -> iwm_load_lmac() (Calibration LMAC)
39 * -> iwm_load_lmac() (Operational LMAC)
40 * -> iwm_send_umac_config()
41 *
42 * iwm_stop() (Called at netdev interface bringdown time)
43 * -> iwm_down()
44 * -> iwm_bus_disable()
45 * -> if_sdio_disable() (In case of an SDIO bus)
46 * -> sdio_disable_func()
47 */
48#include <linux/netdevice.h>
49#include <linux/slab.h>
50
51#include "iwm.h"
52#include "commands.h"
53#include "cfg80211.h"
54#include "debug.h"
55
56static int iwm_open(struct net_device *ndev)
57{
58 struct iwm_priv *iwm = ndev_to_iwm(ndev);
59
60 return iwm_up(iwm);
61}
62
63static int iwm_stop(struct net_device *ndev)
64{
65 struct iwm_priv *iwm = ndev_to_iwm(ndev);
66
67 return iwm_down(iwm);
68}
69
70/*
71 * iwm AC to queue mapping
72 *
73 * AC_VO -> queue 3
74 * AC_VI -> queue 2
75 * AC_BE -> queue 1
76 * AC_BK -> queue 0
77 */
78static const u16 iwm_1d_to_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
79
80int iwm_tid_to_queue(u16 tid)
81{
82 if (tid > IWM_UMAC_TID_NR - 2)
83 return -EINVAL;
84
85 return iwm_1d_to_queue[tid];
86}
87
88static u16 iwm_select_queue(struct net_device *dev, struct sk_buff *skb)
89{
90 skb->priority = cfg80211_classify8021d(skb);
91
92 return iwm_1d_to_queue[skb->priority];
93}
94
95static const struct net_device_ops iwm_netdev_ops = {
96 .ndo_open = iwm_open,
97 .ndo_stop = iwm_stop,
98 .ndo_start_xmit = iwm_xmit_frame,
99 .ndo_select_queue = iwm_select_queue,
100};
101
102void *iwm_if_alloc(int sizeof_bus, struct device *dev,
103 struct iwm_if_ops *if_ops)
104{
105 struct net_device *ndev;
106 struct wireless_dev *wdev;
107 struct iwm_priv *iwm;
108 int ret = 0;
109
110 wdev = iwm_wdev_alloc(sizeof_bus, dev);
111 if (IS_ERR(wdev))
112 return wdev;
113
114 iwm = wdev_to_iwm(wdev);
115 iwm->bus_ops = if_ops;
116 iwm->wdev = wdev;
117
118 ret = iwm_priv_init(iwm);
119 if (ret) {
120 dev_err(dev, "failed to init iwm_priv\n");
121 goto out_wdev;
122 }
123
124 wdev->iftype = iwm_mode_to_nl80211_iftype(iwm->conf.mode);
125
126 ndev = alloc_netdev_mq(0, "wlan%d", ether_setup, IWM_TX_QUEUES);
127 if (!ndev) {
128 dev_err(dev, "no memory for network device instance\n");
129 ret = -ENOMEM;
130 goto out_priv;
131 }
132
133 ndev->netdev_ops = &iwm_netdev_ops;
134 ndev->ieee80211_ptr = wdev;
135 SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
136 wdev->netdev = ndev;
137
138 iwm->umac_profile = kmalloc(sizeof(struct iwm_umac_profile),
139 GFP_KERNEL);
140 if (!iwm->umac_profile) {
141 dev_err(dev, "Couldn't alloc memory for profile\n");
142 ret = -ENOMEM;
143 goto out_profile;
144 }
145
146 iwm_init_default_profile(iwm, iwm->umac_profile);
147
148 return iwm;
149
150 out_profile:
151 free_netdev(ndev);
152
153 out_priv:
154 iwm_priv_deinit(iwm);
155
156 out_wdev:
157 iwm_wdev_free(iwm);
158 return ERR_PTR(ret);
159}
160
161void iwm_if_free(struct iwm_priv *iwm)
162{
163 if (!iwm_to_ndev(iwm))
164 return;
165
166 cancel_delayed_work_sync(&iwm->ct_kill_delay);
167 free_netdev(iwm_to_ndev(iwm));
168 iwm_priv_deinit(iwm);
169 kfree(iwm->umac_profile);
170 iwm->umac_profile = NULL;
171 iwm_wdev_free(iwm);
172}
173
174int iwm_if_add(struct iwm_priv *iwm)
175{
176 struct net_device *ndev = iwm_to_ndev(iwm);
177 int ret;
178
179 ret = register_netdev(ndev);
180 if (ret < 0) {
181 dev_err(&ndev->dev, "Failed to register netdev: %d\n", ret);
182 return ret;
183 }
184
185 return 0;
186}
187
188void iwm_if_remove(struct iwm_priv *iwm)
189{
190 unregister_netdev(iwm_to_ndev(iwm));
191}
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
deleted file mode 100644
index 7d708f4395f3..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ /dev/null
@@ -1,1701 +0,0 @@
1/*
2 * Intel Wireless Multicomm 3200 WiFi driver
3 *
4 * Copyright (C) 2009 Intel Corporation. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 *
33 * Intel Corporation <ilw@linux.intel.com>
34 * Samuel Ortiz <samuel.ortiz@intel.com>
35 * Zhu Yi <yi.zhu@intel.com>
36 *
37 */
38
39#include <linux/kernel.h>
40#include <linux/netdevice.h>
41#include <linux/sched.h>
42#include <linux/etherdevice.h>
43#include <linux/wireless.h>
44#include <linux/ieee80211.h>
45#include <linux/if_arp.h>
46#include <linux/list.h>
47#include <linux/slab.h>
48#include <net/iw_handler.h>
49
50#include "iwm.h"
51#include "debug.h"
52#include "hal.h"
53#include "umac.h"
54#include "lmac.h"
55#include "commands.h"
56#include "rx.h"
57#include "cfg80211.h"
58#include "eeprom.h"
59
60static int iwm_rx_check_udma_hdr(struct iwm_udma_in_hdr *hdr)
61{
62 if ((le32_to_cpu(hdr->cmd) == UMAC_PAD_TERMINAL) ||
63 (le32_to_cpu(hdr->size) == UMAC_PAD_TERMINAL))
64 return -EINVAL;
65
66 return 0;
67}
68
69static inline int iwm_rx_resp_size(struct iwm_udma_in_hdr *hdr)
70{
71 return ALIGN(le32_to_cpu(hdr->size) + sizeof(struct iwm_udma_in_hdr),
72 16);
73}
74
75/*
76 * Notification handlers:
77 *
78 * For every possible notification we can receive from the
79 * target, we have a handler.
80 * When we get a target notification, and there is no one
81 * waiting for it, it's just processed through the rx code
82 * path:
83 *
84 * iwm_rx_handle()
85 * -> iwm_rx_handle_umac()
86 * -> iwm_rx_handle_wifi()
87 * -> iwm_rx_handle_resp()
88 * -> iwm_ntf_*()
89 *
90 * OR
91 *
92 * -> iwm_rx_handle_non_wifi()
93 *
94 * If there are processes waiting for this notification, then
95 * iwm_rx_handle_wifi() just wakes those processes up and they
96 * grab the pending notification.
97 */
98static int iwm_ntf_error(struct iwm_priv *iwm, u8 *buf,
99 unsigned long buf_size, struct iwm_wifi_cmd *cmd)
100{
101 struct iwm_umac_notif_error *error;
102 struct iwm_fw_error_hdr *fw_err;
103
104 error = (struct iwm_umac_notif_error *)buf;
105 fw_err = &error->err;
106
107 memcpy(iwm->last_fw_err, fw_err, sizeof(struct iwm_fw_error_hdr));
108
109 IWM_ERR(iwm, "%cMAC FW ERROR:\n",
110 (le32_to_cpu(fw_err->category) == UMAC_SYS_ERR_CAT_LMAC) ? 'L' : 'U');
111 IWM_ERR(iwm, "\tCategory: %d\n", le32_to_cpu(fw_err->category));
112 IWM_ERR(iwm, "\tStatus: 0x%x\n", le32_to_cpu(fw_err->status));
113 IWM_ERR(iwm, "\tPC: 0x%x\n", le32_to_cpu(fw_err->pc));
114 IWM_ERR(iwm, "\tblink1: %d\n", le32_to_cpu(fw_err->blink1));
115 IWM_ERR(iwm, "\tblink2: %d\n", le32_to_cpu(fw_err->blink2));
116 IWM_ERR(iwm, "\tilink1: %d\n", le32_to_cpu(fw_err->ilink1));
117 IWM_ERR(iwm, "\tilink2: %d\n", le32_to_cpu(fw_err->ilink2));
118 IWM_ERR(iwm, "\tData1: 0x%x\n", le32_to_cpu(fw_err->data1));
119 IWM_ERR(iwm, "\tData2: 0x%x\n", le32_to_cpu(fw_err->data2));
120 IWM_ERR(iwm, "\tLine number: %d\n", le32_to_cpu(fw_err->line_num));
121 IWM_ERR(iwm, "\tUMAC status: 0x%x\n", le32_to_cpu(fw_err->umac_status));
122 IWM_ERR(iwm, "\tLMAC status: 0x%x\n", le32_to_cpu(fw_err->lmac_status));
123 IWM_ERR(iwm, "\tSDIO status: 0x%x\n", le32_to_cpu(fw_err->sdio_status));
124
125 iwm_resetting(iwm);
126
127 return 0;
128}
129
130static int iwm_ntf_umac_alive(struct iwm_priv *iwm, u8 *buf,
131 unsigned long buf_size, struct iwm_wifi_cmd *cmd)
132{
133 struct iwm_umac_notif_alive *alive_resp =
134 (struct iwm_umac_notif_alive *)(buf);
135 u16 status = le16_to_cpu(alive_resp->status);
136
137 if (status == UMAC_NTFY_ALIVE_STATUS_ERR) {
138 IWM_ERR(iwm, "Receive error UMAC_ALIVE\n");
139 return -EIO;
140 }
141
142 iwm_tx_credit_init_pools(iwm, alive_resp);
143
144 return 0;
145}
146
147static int iwm_ntf_init_complete(struct iwm_priv *iwm, u8 *buf,
148 unsigned long buf_size,
149 struct iwm_wifi_cmd *cmd)
150{
151 struct wiphy *wiphy = iwm_to_wiphy(iwm);
152 struct iwm_umac_notif_init_complete *init_complete =
153 (struct iwm_umac_notif_init_complete *)(buf);
154 u16 status = le16_to_cpu(init_complete->status);
155 bool blocked = (status == UMAC_NTFY_INIT_COMPLETE_STATUS_ERR);
156
157 if (blocked)
158 IWM_DBG_NTF(iwm, DBG, "Hardware rf kill is on (radio off)\n");
159 else
160 IWM_DBG_NTF(iwm, DBG, "Hardware rf kill is off (radio on)\n");
161
162 wiphy_rfkill_set_hw_state(wiphy, blocked);
163
164 return 0;
165}
166
167static int iwm_ntf_tx_credit_update(struct iwm_priv *iwm, u8 *buf,
168 unsigned long buf_size,
169 struct iwm_wifi_cmd *cmd)
170{
171 int pool_nr, total_freed_pages;
172 unsigned long pool_map;
173 int i, id;
174 struct iwm_umac_notif_page_dealloc *dealloc =
175 (struct iwm_umac_notif_page_dealloc *)buf;
176
177 pool_nr = GET_VAL32(dealloc->changes, UMAC_DEALLOC_NTFY_CHANGES_CNT);
178 pool_map = GET_VAL32(dealloc->changes, UMAC_DEALLOC_NTFY_CHANGES_MSK);
179
180 IWM_DBG_TX(iwm, DBG, "UMAC dealloc notification: pool nr %d, "
181 "update map 0x%lx\n", pool_nr, pool_map);
182
183 spin_lock(&iwm->tx_credit.lock);
184
185 for (i = 0; i < pool_nr; i++) {
186 id = GET_VAL32(dealloc->grp_info[i],
187 UMAC_DEALLOC_NTFY_GROUP_NUM);
188 if (test_bit(id, &pool_map)) {
189 total_freed_pages = GET_VAL32(dealloc->grp_info[i],
190 UMAC_DEALLOC_NTFY_PAGE_CNT);
191 iwm_tx_credit_inc(iwm, id, total_freed_pages);
192 }
193 }
194
195 spin_unlock(&iwm->tx_credit.lock);
196
197 return 0;
198}
199
200static int iwm_ntf_umac_reset(struct iwm_priv *iwm, u8 *buf,
201 unsigned long buf_size, struct iwm_wifi_cmd *cmd)
202{
203 IWM_DBG_NTF(iwm, DBG, "UMAC RESET done\n");
204
205 return 0;
206}
207
208static int iwm_ntf_lmac_version(struct iwm_priv *iwm, u8 *buf,
209 unsigned long buf_size,
210 struct iwm_wifi_cmd *cmd)
211{
212 IWM_DBG_NTF(iwm, INFO, "LMAC Version: %x.%x\n", buf[9], buf[8]);
213
214 return 0;
215}
216
217static int iwm_ntf_tx(struct iwm_priv *iwm, u8 *buf,
218 unsigned long buf_size, struct iwm_wifi_cmd *cmd)
219{
220 struct iwm_lmac_tx_resp *tx_resp;
221 struct iwm_umac_wifi_in_hdr *hdr;
222
223 tx_resp = (struct iwm_lmac_tx_resp *)
224 (buf + sizeof(struct iwm_umac_wifi_in_hdr));
225 hdr = (struct iwm_umac_wifi_in_hdr *)buf;
226
227 IWM_DBG_TX(iwm, DBG, "REPLY_TX, buf size: %lu\n", buf_size);
228
229 IWM_DBG_TX(iwm, DBG, "Seqnum: %d\n",
230 le16_to_cpu(hdr->sw_hdr.cmd.seq_num));
231 IWM_DBG_TX(iwm, DBG, "\tFrame cnt: %d\n", tx_resp->frame_cnt);
232 IWM_DBG_TX(iwm, DBG, "\tRetry cnt: %d\n",
233 le16_to_cpu(tx_resp->retry_cnt));
234 IWM_DBG_TX(iwm, DBG, "\tSeq ctl: %d\n", le16_to_cpu(tx_resp->seq_ctl));
235 IWM_DBG_TX(iwm, DBG, "\tByte cnt: %d\n",
236 le16_to_cpu(tx_resp->byte_cnt));
237 IWM_DBG_TX(iwm, DBG, "\tStatus: 0x%x\n", le32_to_cpu(tx_resp->status));
238
239 return 0;
240}
241
242
243static int iwm_ntf_calib_res(struct iwm_priv *iwm, u8 *buf,
244 unsigned long buf_size, struct iwm_wifi_cmd *cmd)
245{
246 u8 opcode;
247 u8 *calib_buf;
248 struct iwm_lmac_calib_hdr *hdr = (struct iwm_lmac_calib_hdr *)
249 (buf + sizeof(struct iwm_umac_wifi_in_hdr));
250
251 opcode = hdr->opcode;
252
253 BUG_ON(opcode >= CALIBRATION_CMD_NUM ||
254 opcode < PHY_CALIBRATE_OPCODES_NUM);
255
256 IWM_DBG_NTF(iwm, DBG, "Store calibration result for opcode: %d\n",
257 opcode);
258
259 buf_size -= sizeof(struct iwm_umac_wifi_in_hdr);
260 calib_buf = iwm->calib_res[opcode].buf;
261
262 if (!calib_buf || (iwm->calib_res[opcode].size < buf_size)) {
263 kfree(calib_buf);
264 calib_buf = kzalloc(buf_size, GFP_KERNEL);
265 if (!calib_buf) {
266 IWM_ERR(iwm, "Memory allocation failed: calib_res\n");
267 return -ENOMEM;
268 }
269 iwm->calib_res[opcode].buf = calib_buf;
270 iwm->calib_res[opcode].size = buf_size;
271 }
272
273 memcpy(calib_buf, hdr, buf_size);
274 set_bit(opcode - PHY_CALIBRATE_OPCODES_NUM, &iwm->calib_done_map);
275
276 return 0;
277}
278
279static int iwm_ntf_calib_complete(struct iwm_priv *iwm, u8 *buf,
280 unsigned long buf_size,
281 struct iwm_wifi_cmd *cmd)
282{
283 IWM_DBG_NTF(iwm, DBG, "Calibration completed\n");
284
285 return 0;
286}
287
288static int iwm_ntf_calib_cfg(struct iwm_priv *iwm, u8 *buf,
289 unsigned long buf_size, struct iwm_wifi_cmd *cmd)
290{
291 struct iwm_lmac_cal_cfg_resp *cal_resp;
292
293 cal_resp = (struct iwm_lmac_cal_cfg_resp *)
294 (buf + sizeof(struct iwm_umac_wifi_in_hdr));
295
296 IWM_DBG_NTF(iwm, DBG, "Calibration CFG command status: %d\n",
297 le32_to_cpu(cal_resp->status));
298
299 return 0;
300}
301
302static int iwm_ntf_wifi_status(struct iwm_priv *iwm, u8 *buf,
303 unsigned long buf_size, struct iwm_wifi_cmd *cmd)
304{
305 struct iwm_umac_notif_wifi_status *status =
306 (struct iwm_umac_notif_wifi_status *)buf;
307
308 iwm->core_enabled |= le16_to_cpu(status->status);
309
310 return 0;
311}
312
313static struct iwm_rx_ticket_node *
314iwm_rx_ticket_node_alloc(struct iwm_priv *iwm, struct iwm_rx_ticket *ticket)
315{
316 struct iwm_rx_ticket_node *ticket_node;
317
318 ticket_node = kzalloc(sizeof(struct iwm_rx_ticket_node), GFP_KERNEL);
319 if (!ticket_node) {
320 IWM_ERR(iwm, "Couldn't allocate ticket node\n");
321 return ERR_PTR(-ENOMEM);
322 }
323
324 ticket_node->ticket = kmemdup(ticket, sizeof(struct iwm_rx_ticket),
325 GFP_KERNEL);
326 if (!ticket_node->ticket) {
327 IWM_ERR(iwm, "Couldn't allocate RX ticket\n");
328 kfree(ticket_node);
329 return ERR_PTR(-ENOMEM);
330 }
331
332 INIT_LIST_HEAD(&ticket_node->node);
333
334 return ticket_node;
335}
336
337static void iwm_rx_ticket_node_free(struct iwm_rx_ticket_node *ticket_node)
338{
339 kfree(ticket_node->ticket);
340 kfree(ticket_node);
341}
342
343static struct iwm_rx_packet *iwm_rx_packet_get(struct iwm_priv *iwm, u16 id)
344{
345 u8 id_hash = IWM_RX_ID_GET_HASH(id);
346 struct iwm_rx_packet *packet;
347
348 spin_lock(&iwm->packet_lock[id_hash]);
349 list_for_each_entry(packet, &iwm->rx_packets[id_hash], node)
350 if (packet->id == id) {
351 list_del(&packet->node);
352 spin_unlock(&iwm->packet_lock[id_hash]);
353 return packet;
354 }
355
356 spin_unlock(&iwm->packet_lock[id_hash]);
357 return NULL;
358}
359
360static struct iwm_rx_packet *iwm_rx_packet_alloc(struct iwm_priv *iwm, u8 *buf,
361 u32 size, u16 id)
362{
363 struct iwm_rx_packet *packet;
364
365 packet = kzalloc(sizeof(struct iwm_rx_packet), GFP_KERNEL);
366 if (!packet) {
367 IWM_ERR(iwm, "Couldn't allocate packet\n");
368 return ERR_PTR(-ENOMEM);
369 }
370
371 packet->skb = dev_alloc_skb(size);
372 if (!packet->skb) {
373 IWM_ERR(iwm, "Couldn't allocate packet SKB\n");
374 kfree(packet);
375 return ERR_PTR(-ENOMEM);
376 }
377
378 packet->pkt_size = size;
379
380 skb_put(packet->skb, size);
381 memcpy(packet->skb->data, buf, size);
382 INIT_LIST_HEAD(&packet->node);
383 packet->id = id;
384
385 return packet;
386}
387
388void iwm_rx_free(struct iwm_priv *iwm)
389{
390 struct iwm_rx_ticket_node *ticket, *nt;
391 struct iwm_rx_packet *packet, *np;
392 int i;
393
394 spin_lock(&iwm->ticket_lock);
395 list_for_each_entry_safe(ticket, nt, &iwm->rx_tickets, node) {
396 list_del(&ticket->node);
397 iwm_rx_ticket_node_free(ticket);
398 }
399 spin_unlock(&iwm->ticket_lock);
400
401 for (i = 0; i < IWM_RX_ID_HASH; i++) {
402 spin_lock(&iwm->packet_lock[i]);
403 list_for_each_entry_safe(packet, np, &iwm->rx_packets[i],
404 node) {
405 list_del(&packet->node);
406 kfree_skb(packet->skb);
407 kfree(packet);
408 }
409 spin_unlock(&iwm->packet_lock[i]);
410 }
411}
412
413static int iwm_ntf_rx_ticket(struct iwm_priv *iwm, u8 *buf,
414 unsigned long buf_size, struct iwm_wifi_cmd *cmd)
415{
416 struct iwm_umac_notif_rx_ticket *ntf_rx_ticket =
417 (struct iwm_umac_notif_rx_ticket *)buf;
418 struct iwm_rx_ticket *ticket =
419 (struct iwm_rx_ticket *)ntf_rx_ticket->tickets;
420 int i, schedule_rx = 0;
421
422 for (i = 0; i < ntf_rx_ticket->num_tickets; i++) {
423 struct iwm_rx_ticket_node *ticket_node;
424
425 switch (le16_to_cpu(ticket->action)) {
426 case IWM_RX_TICKET_RELEASE:
427 case IWM_RX_TICKET_DROP:
428 /* We can push the packet to the stack */
429 ticket_node = iwm_rx_ticket_node_alloc(iwm, ticket);
430 if (IS_ERR(ticket_node))
431 return PTR_ERR(ticket_node);
432
433 IWM_DBG_RX(iwm, DBG, "TICKET %s(%d)\n",
434 __le16_to_cpu(ticket->action) ==
435 IWM_RX_TICKET_RELEASE ?
436 "RELEASE" : "DROP",
437 ticket->id);
438 spin_lock(&iwm->ticket_lock);
439 list_add_tail(&ticket_node->node, &iwm->rx_tickets);
440 spin_unlock(&iwm->ticket_lock);
441
442 /*
443 * We received an Rx ticket, most likely there's
444 * a packet pending for it, it's not worth going
445 * through the packet hash list to double check.
446 * Let's just fire the rx worker..
447 */
448 schedule_rx = 1;
449
450 break;
451
452 default:
453 IWM_ERR(iwm, "Invalid RX ticket action: 0x%x\n",
454 ticket->action);
455 }
456
457 ticket++;
458 }
459
460 if (schedule_rx)
461 queue_work(iwm->rx_wq, &iwm->rx_worker);
462
463 return 0;
464}
465
466static int iwm_ntf_rx_packet(struct iwm_priv *iwm, u8 *buf,
467 unsigned long buf_size, struct iwm_wifi_cmd *cmd)
468{
469 struct iwm_umac_wifi_in_hdr *wifi_hdr;
470 struct iwm_rx_packet *packet;
471 u16 id, buf_offset;
472 u32 packet_size;
473 u8 id_hash;
474
475 IWM_DBG_RX(iwm, DBG, "\n");
476
477 wifi_hdr = (struct iwm_umac_wifi_in_hdr *)buf;
478 id = le16_to_cpu(wifi_hdr->sw_hdr.cmd.seq_num);
479 buf_offset = sizeof(struct iwm_umac_wifi_in_hdr);
480 packet_size = buf_size - sizeof(struct iwm_umac_wifi_in_hdr);
481
482 IWM_DBG_RX(iwm, DBG, "CMD:0x%x, seqnum: %d, packet size: %d\n",
483 wifi_hdr->sw_hdr.cmd.cmd, id, packet_size);
484 IWM_DBG_RX(iwm, DBG, "Packet id: %d\n", id);
485 IWM_HEXDUMP(iwm, DBG, RX, "PACKET: ", buf + buf_offset, packet_size);
486
487 packet = iwm_rx_packet_alloc(iwm, buf + buf_offset, packet_size, id);
488 if (IS_ERR(packet))
489 return PTR_ERR(packet);
490
491 id_hash = IWM_RX_ID_GET_HASH(id);
492 spin_lock(&iwm->packet_lock[id_hash]);
493 list_add_tail(&packet->node, &iwm->rx_packets[id_hash]);
494 spin_unlock(&iwm->packet_lock[id_hash]);
495
496 /* We might (unlikely) have received the packet _after_ the ticket */
497 queue_work(iwm->rx_wq, &iwm->rx_worker);
498
499 return 0;
500}
501
502/* MLME handlers */
503static int iwm_mlme_assoc_start(struct iwm_priv *iwm, u8 *buf,
504 unsigned long buf_size,
505 struct iwm_wifi_cmd *cmd)
506{
507 struct iwm_umac_notif_assoc_start *start;
508
509 start = (struct iwm_umac_notif_assoc_start *)buf;
510
511 IWM_DBG_MLME(iwm, INFO, "Association with %pM Started, reason: %d\n",
512 start->bssid, le32_to_cpu(start->roam_reason));
513
514 wake_up_interruptible(&iwm->mlme_queue);
515
516 return 0;
517}
518
519static u8 iwm_is_open_wep_profile(struct iwm_priv *iwm)
520{
521 if ((iwm->umac_profile->sec.ucast_cipher == UMAC_CIPHER_TYPE_WEP_40 ||
522 iwm->umac_profile->sec.ucast_cipher == UMAC_CIPHER_TYPE_WEP_104) &&
523 (iwm->umac_profile->sec.ucast_cipher ==
524 iwm->umac_profile->sec.mcast_cipher) &&
525 (iwm->umac_profile->sec.auth_type == UMAC_AUTH_TYPE_OPEN))
526 return 1;
527
528 return 0;
529}
530
531static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
532 unsigned long buf_size,
533 struct iwm_wifi_cmd *cmd)
534{
535 struct wiphy *wiphy = iwm_to_wiphy(iwm);
536 struct ieee80211_channel *chan;
537 struct iwm_umac_notif_assoc_complete *complete =
538 (struct iwm_umac_notif_assoc_complete *)buf;
539
540 IWM_DBG_MLME(iwm, INFO, "Association with %pM completed, status: %d\n",
541 complete->bssid, complete->status);
542
543 switch (le32_to_cpu(complete->status)) {
544 case UMAC_ASSOC_COMPLETE_SUCCESS:
545 chan = ieee80211_get_channel(wiphy,
546 ieee80211_channel_to_frequency(complete->channel,
547 complete->band == UMAC_BAND_2GHZ ?
548 IEEE80211_BAND_2GHZ :
549 IEEE80211_BAND_5GHZ));
550 if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) {
551 /* Associated to a unallowed channel, disassociate. */
552 __iwm_invalidate_mlme_profile(iwm);
553 IWM_WARN(iwm, "Couldn't associate with %pM due to "
554 "channel %d is disabled. Check your local "
555 "regulatory setting.\n",
556 complete->bssid, complete->channel);
557 goto failure;
558 }
559
560 set_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
561 memcpy(iwm->bssid, complete->bssid, ETH_ALEN);
562 iwm->channel = complete->channel;
563
564 /* Internal roaming state, avoid notifying SME. */
565 if (!test_and_clear_bit(IWM_STATUS_SME_CONNECTING, &iwm->status)
566 && iwm->conf.mode == UMAC_MODE_BSS) {
567 cancel_delayed_work(&iwm->disconnect);
568 cfg80211_roamed(iwm_to_ndev(iwm), NULL,
569 complete->bssid,
570 iwm->req_ie, iwm->req_ie_len,
571 iwm->resp_ie, iwm->resp_ie_len,
572 GFP_KERNEL);
573 break;
574 }
575
576 iwm_link_on(iwm);
577
578 if (iwm->conf.mode == UMAC_MODE_IBSS)
579 goto ibss;
580
581 if (!test_bit(IWM_STATUS_RESETTING, &iwm->status))
582 cfg80211_connect_result(iwm_to_ndev(iwm),
583 complete->bssid,
584 iwm->req_ie, iwm->req_ie_len,
585 iwm->resp_ie, iwm->resp_ie_len,
586 WLAN_STATUS_SUCCESS,
587 GFP_KERNEL);
588 else
589 cfg80211_roamed(iwm_to_ndev(iwm), NULL,
590 complete->bssid,
591 iwm->req_ie, iwm->req_ie_len,
592 iwm->resp_ie, iwm->resp_ie_len,
593 GFP_KERNEL);
594 break;
595 case UMAC_ASSOC_COMPLETE_FAILURE:
596 failure:
597 clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
598 memset(iwm->bssid, 0, ETH_ALEN);
599 iwm->channel = 0;
600
601 /* Internal roaming state, avoid notifying SME. */
602 if (!test_and_clear_bit(IWM_STATUS_SME_CONNECTING, &iwm->status)
603 && iwm->conf.mode == UMAC_MODE_BSS) {
604 cancel_delayed_work(&iwm->disconnect);
605 break;
606 }
607
608 iwm_link_off(iwm);
609
610 if (iwm->conf.mode == UMAC_MODE_IBSS)
611 goto ibss;
612
613 if (!test_bit(IWM_STATUS_RESETTING, &iwm->status))
614 if (!iwm_is_open_wep_profile(iwm)) {
615 cfg80211_connect_result(iwm_to_ndev(iwm),
616 complete->bssid,
617 NULL, 0, NULL, 0,
618 WLAN_STATUS_UNSPECIFIED_FAILURE,
619 GFP_KERNEL);
620 } else {
621 /* Let's try shared WEP auth */
622 IWM_ERR(iwm, "Trying WEP shared auth\n");
623 schedule_work(&iwm->auth_retry_worker);
624 }
625 else
626 cfg80211_disconnected(iwm_to_ndev(iwm), 0, NULL, 0,
627 GFP_KERNEL);
628 break;
629 default:
630 break;
631 }
632
633 clear_bit(IWM_STATUS_RESETTING, &iwm->status);
634 return 0;
635
636 ibss:
637 cfg80211_ibss_joined(iwm_to_ndev(iwm), iwm->bssid, GFP_KERNEL);
638 clear_bit(IWM_STATUS_RESETTING, &iwm->status);
639 return 0;
640}
641
642static int iwm_mlme_profile_invalidate(struct iwm_priv *iwm, u8 *buf,
643 unsigned long buf_size,
644 struct iwm_wifi_cmd *cmd)
645{
646 struct iwm_umac_notif_profile_invalidate *invalid;
647 u32 reason;
648
649 invalid = (struct iwm_umac_notif_profile_invalidate *)buf;
650 reason = le32_to_cpu(invalid->reason);
651
652 IWM_DBG_MLME(iwm, INFO, "Profile Invalidated. Reason: %d\n", reason);
653
654 if (reason != UMAC_PROFILE_INVALID_REQUEST &&
655 test_bit(IWM_STATUS_SME_CONNECTING, &iwm->status))
656 cfg80211_connect_result(iwm_to_ndev(iwm), NULL, NULL, 0, NULL,
657 0, WLAN_STATUS_UNSPECIFIED_FAILURE,
658 GFP_KERNEL);
659
660 clear_bit(IWM_STATUS_SME_CONNECTING, &iwm->status);
661 clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
662
663 iwm->umac_profile_active = false;
664 memset(iwm->bssid, 0, ETH_ALEN);
665 iwm->channel = 0;
666
667 iwm_link_off(iwm);
668
669 wake_up_interruptible(&iwm->mlme_queue);
670
671 return 0;
672}
673
674#define IWM_DISCONNECT_INTERVAL (5 * HZ)
675
676static int iwm_mlme_connection_terminated(struct iwm_priv *iwm, u8 *buf,
677 unsigned long buf_size,
678 struct iwm_wifi_cmd *cmd)
679{
680 IWM_DBG_MLME(iwm, DBG, "Connection terminated\n");
681
682 schedule_delayed_work(&iwm->disconnect, IWM_DISCONNECT_INTERVAL);
683
684 return 0;
685}
686
687static int iwm_mlme_scan_complete(struct iwm_priv *iwm, u8 *buf,
688 unsigned long buf_size,
689 struct iwm_wifi_cmd *cmd)
690{
691 int ret;
692 struct iwm_umac_notif_scan_complete *scan_complete =
693 (struct iwm_umac_notif_scan_complete *)buf;
694 u32 result = le32_to_cpu(scan_complete->result);
695
696 IWM_DBG_MLME(iwm, INFO, "type:0x%x result:0x%x seq:%d\n",
697 le32_to_cpu(scan_complete->type),
698 le32_to_cpu(scan_complete->result),
699 scan_complete->seq_num);
700
701 if (!test_and_clear_bit(IWM_STATUS_SCANNING, &iwm->status)) {
702 IWM_ERR(iwm, "Scan complete while device not scanning\n");
703 return -EIO;
704 }
705 if (!iwm->scan_request)
706 return 0;
707
708 ret = iwm_cfg80211_inform_bss(iwm);
709
710 cfg80211_scan_done(iwm->scan_request,
711 (result & UMAC_SCAN_RESULT_ABORTED) ? 1 : !!ret);
712 iwm->scan_request = NULL;
713
714 return ret;
715}
716
717static int iwm_mlme_update_sta_table(struct iwm_priv *iwm, u8 *buf,
718 unsigned long buf_size,
719 struct iwm_wifi_cmd *cmd)
720{
721 struct iwm_umac_notif_sta_info *umac_sta =
722 (struct iwm_umac_notif_sta_info *)buf;
723 struct iwm_sta_info *sta;
724 int i;
725
726 switch (le32_to_cpu(umac_sta->opcode)) {
727 case UMAC_OPCODE_ADD_MODIFY:
728 sta = &iwm->sta_table[GET_VAL8(umac_sta->sta_id, LMAC_STA_ID)];
729
730 IWM_DBG_MLME(iwm, INFO, "%s STA: ID = %d, Color = %d, "
731 "addr = %pM, qos = %d\n",
732 sta->valid ? "Modify" : "Add",
733 GET_VAL8(umac_sta->sta_id, LMAC_STA_ID),
734 GET_VAL8(umac_sta->sta_id, LMAC_STA_COLOR),
735 umac_sta->mac_addr,
736 umac_sta->flags & UMAC_STA_FLAG_QOS);
737
738 sta->valid = true;
739 sta->qos = umac_sta->flags & UMAC_STA_FLAG_QOS;
740 sta->color = GET_VAL8(umac_sta->sta_id, LMAC_STA_COLOR);
741 memcpy(sta->addr, umac_sta->mac_addr, ETH_ALEN);
742 break;
743 case UMAC_OPCODE_REMOVE:
744 IWM_DBG_MLME(iwm, INFO, "Remove STA: ID = %d, Color = %d, "
745 "addr = %pM\n",
746 GET_VAL8(umac_sta->sta_id, LMAC_STA_ID),
747 GET_VAL8(umac_sta->sta_id, LMAC_STA_COLOR),
748 umac_sta->mac_addr);
749
750 sta = &iwm->sta_table[GET_VAL8(umac_sta->sta_id, LMAC_STA_ID)];
751
752 if (!memcmp(sta->addr, umac_sta->mac_addr, ETH_ALEN))
753 sta->valid = false;
754
755 break;
756 case UMAC_OPCODE_CLEAR_ALL:
757 for (i = 0; i < IWM_STA_TABLE_NUM; i++)
758 iwm->sta_table[i].valid = false;
759
760 break;
761 default:
762 break;
763 }
764
765 return 0;
766}
767
768static int iwm_mlme_medium_lost(struct iwm_priv *iwm, u8 *buf,
769 unsigned long buf_size,
770 struct iwm_wifi_cmd *cmd)
771{
772 struct wiphy *wiphy = iwm_to_wiphy(iwm);
773
774 IWM_DBG_NTF(iwm, DBG, "WiFi/WiMax coexistence radio is OFF\n");
775
776 wiphy_rfkill_set_hw_state(wiphy, true);
777
778 return 0;
779}
780
781static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
782 unsigned long buf_size,
783 struct iwm_wifi_cmd *cmd)
784{
785 struct wiphy *wiphy = iwm_to_wiphy(iwm);
786 struct ieee80211_mgmt *mgmt;
787 struct iwm_umac_notif_bss_info *umac_bss =
788 (struct iwm_umac_notif_bss_info *)buf;
789 struct ieee80211_channel *channel;
790 struct ieee80211_supported_band *band;
791 struct iwm_bss_info *bss;
792 s32 signal;
793 int freq;
794 u16 frame_len = le16_to_cpu(umac_bss->frame_len);
795 size_t bss_len = sizeof(struct iwm_umac_notif_bss_info) + frame_len;
796
797 mgmt = (struct ieee80211_mgmt *)(umac_bss->frame_buf);
798
799 IWM_DBG_MLME(iwm, DBG, "New BSS info entry: %pM\n", mgmt->bssid);
800 IWM_DBG_MLME(iwm, DBG, "\tType: 0x%x\n", le32_to_cpu(umac_bss->type));
801 IWM_DBG_MLME(iwm, DBG, "\tTimestamp: %d\n",
802 le32_to_cpu(umac_bss->timestamp));
803 IWM_DBG_MLME(iwm, DBG, "\tTable Index: %d\n",
804 le16_to_cpu(umac_bss->table_idx));
805 IWM_DBG_MLME(iwm, DBG, "\tBand: %d\n", umac_bss->band);
806 IWM_DBG_MLME(iwm, DBG, "\tChannel: %d\n", umac_bss->channel);
807 IWM_DBG_MLME(iwm, DBG, "\tRSSI: %d\n", umac_bss->rssi);
808 IWM_DBG_MLME(iwm, DBG, "\tFrame Length: %d\n", frame_len);
809
810 list_for_each_entry(bss, &iwm->bss_list, node)
811 if (bss->bss->table_idx == umac_bss->table_idx)
812 break;
813
814 if (&bss->node != &iwm->bss_list) {
815 /* Remove the old BSS entry, we will add it back later. */
816 list_del(&bss->node);
817 kfree(bss->bss);
818 } else {
819 /* New BSS entry */
820
821 bss = kzalloc(sizeof(struct iwm_bss_info), GFP_KERNEL);
822 if (!bss) {
823 IWM_ERR(iwm, "Couldn't allocate bss_info\n");
824 return -ENOMEM;
825 }
826 }
827
828 bss->bss = kzalloc(bss_len, GFP_KERNEL);
829 if (!bss->bss) {
830 kfree(bss);
831 IWM_ERR(iwm, "Couldn't allocate bss\n");
832 return -ENOMEM;
833 }
834
835 INIT_LIST_HEAD(&bss->node);
836 memcpy(bss->bss, umac_bss, bss_len);
837
838 if (umac_bss->band == UMAC_BAND_2GHZ)
839 band = wiphy->bands[IEEE80211_BAND_2GHZ];
840 else if (umac_bss->band == UMAC_BAND_5GHZ)
841 band = wiphy->bands[IEEE80211_BAND_5GHZ];
842 else {
843 IWM_ERR(iwm, "Invalid band: %d\n", umac_bss->band);
844 goto err;
845 }
846
847 freq = ieee80211_channel_to_frequency(umac_bss->channel, band->band);
848 channel = ieee80211_get_channel(wiphy, freq);
849 signal = umac_bss->rssi * 100;
850
851 bss->cfg_bss = cfg80211_inform_bss_frame(wiphy, channel,
852 mgmt, frame_len,
853 signal, GFP_KERNEL);
854 if (!bss->cfg_bss)
855 goto err;
856
857 list_add_tail(&bss->node, &iwm->bss_list);
858
859 return 0;
860 err:
861 kfree(bss->bss);
862 kfree(bss);
863
864 return -EINVAL;
865}
866
867static int iwm_mlme_remove_bss(struct iwm_priv *iwm, u8 *buf,
868 unsigned long buf_size, struct iwm_wifi_cmd *cmd)
869{
870 struct iwm_umac_notif_bss_removed *bss_rm =
871 (struct iwm_umac_notif_bss_removed *)buf;
872 struct iwm_bss_info *bss, *next;
873 u16 table_idx;
874 int i;
875
876 for (i = 0; i < le32_to_cpu(bss_rm->count); i++) {
877 table_idx = le16_to_cpu(bss_rm->entries[i]) &
878 IWM_BSS_REMOVE_INDEX_MSK;
879 list_for_each_entry_safe(bss, next, &iwm->bss_list, node)
880 if (bss->bss->table_idx == cpu_to_le16(table_idx)) {
881 struct ieee80211_mgmt *mgmt;
882
883 mgmt = (struct ieee80211_mgmt *)
884 (bss->bss->frame_buf);
885 IWM_DBG_MLME(iwm, ERR, "BSS removed: %pM\n",
886 mgmt->bssid);
887 list_del(&bss->node);
888 kfree(bss->bss);
889 kfree(bss);
890 }
891 }
892
893 return 0;
894}
895
896static int iwm_mlme_mgt_frame(struct iwm_priv *iwm, u8 *buf,
897 unsigned long buf_size, struct iwm_wifi_cmd *cmd)
898{
899 struct iwm_umac_notif_mgt_frame *mgt_frame =
900 (struct iwm_umac_notif_mgt_frame *)buf;
901 struct ieee80211_mgmt *mgt = (struct ieee80211_mgmt *)mgt_frame->frame;
902
903 IWM_HEXDUMP(iwm, DBG, MLME, "MGT: ", mgt_frame->frame,
904 le16_to_cpu(mgt_frame->len));
905
906 if (ieee80211_is_assoc_req(mgt->frame_control)) {
907 iwm->req_ie_len = le16_to_cpu(mgt_frame->len)
908 - offsetof(struct ieee80211_mgmt,
909 u.assoc_req.variable);
910 kfree(iwm->req_ie);
911 iwm->req_ie = kmemdup(mgt->u.assoc_req.variable,
912 iwm->req_ie_len, GFP_KERNEL);
913 } else if (ieee80211_is_reassoc_req(mgt->frame_control)) {
914 iwm->req_ie_len = le16_to_cpu(mgt_frame->len)
915 - offsetof(struct ieee80211_mgmt,
916 u.reassoc_req.variable);
917 kfree(iwm->req_ie);
918 iwm->req_ie = kmemdup(mgt->u.reassoc_req.variable,
919 iwm->req_ie_len, GFP_KERNEL);
920 } else if (ieee80211_is_assoc_resp(mgt->frame_control)) {
921 iwm->resp_ie_len = le16_to_cpu(mgt_frame->len)
922 - offsetof(struct ieee80211_mgmt,
923 u.assoc_resp.variable);
924 kfree(iwm->resp_ie);
925 iwm->resp_ie = kmemdup(mgt->u.assoc_resp.variable,
926 iwm->resp_ie_len, GFP_KERNEL);
927 } else if (ieee80211_is_reassoc_resp(mgt->frame_control)) {
928 iwm->resp_ie_len = le16_to_cpu(mgt_frame->len)
929 - offsetof(struct ieee80211_mgmt,
930 u.reassoc_resp.variable);
931 kfree(iwm->resp_ie);
932 iwm->resp_ie = kmemdup(mgt->u.reassoc_resp.variable,
933 iwm->resp_ie_len, GFP_KERNEL);
934 } else {
935 IWM_ERR(iwm, "Unsupported management frame: 0x%x",
936 le16_to_cpu(mgt->frame_control));
937 return 0;
938 }
939
940 return 0;
941}
942
943static int iwm_ntf_mlme(struct iwm_priv *iwm, u8 *buf,
944 unsigned long buf_size, struct iwm_wifi_cmd *cmd)
945{
946 struct iwm_umac_notif_wifi_if *notif =
947 (struct iwm_umac_notif_wifi_if *)buf;
948
949 switch (notif->status) {
950 case WIFI_IF_NTFY_ASSOC_START:
951 return iwm_mlme_assoc_start(iwm, buf, buf_size, cmd);
952 case WIFI_IF_NTFY_ASSOC_COMPLETE:
953 return iwm_mlme_assoc_complete(iwm, buf, buf_size, cmd);
954 case WIFI_IF_NTFY_PROFILE_INVALIDATE_COMPLETE:
955 return iwm_mlme_profile_invalidate(iwm, buf, buf_size, cmd);
956 case WIFI_IF_NTFY_CONNECTION_TERMINATED:
957 return iwm_mlme_connection_terminated(iwm, buf, buf_size, cmd);
958 case WIFI_IF_NTFY_SCAN_COMPLETE:
959 return iwm_mlme_scan_complete(iwm, buf, buf_size, cmd);
960 case WIFI_IF_NTFY_STA_TABLE_CHANGE:
961 return iwm_mlme_update_sta_table(iwm, buf, buf_size, cmd);
962 case WIFI_IF_NTFY_EXTENDED_IE_REQUIRED:
963 IWM_DBG_MLME(iwm, DBG, "Extended IE required\n");
964 break;
965 case WIFI_IF_NTFY_RADIO_PREEMPTION:
966 return iwm_mlme_medium_lost(iwm, buf, buf_size, cmd);
967 case WIFI_IF_NTFY_BSS_TRK_TABLE_CHANGED:
968 return iwm_mlme_update_bss_table(iwm, buf, buf_size, cmd);
969 case WIFI_IF_NTFY_BSS_TRK_ENTRIES_REMOVED:
970 return iwm_mlme_remove_bss(iwm, buf, buf_size, cmd);
971 break;
972 case WIFI_IF_NTFY_MGMT_FRAME:
973 return iwm_mlme_mgt_frame(iwm, buf, buf_size, cmd);
974 case WIFI_DBG_IF_NTFY_SCAN_SUPER_JOB_START:
975 case WIFI_DBG_IF_NTFY_SCAN_SUPER_JOB_COMPLETE:
976 case WIFI_DBG_IF_NTFY_SCAN_CHANNEL_START:
977 case WIFI_DBG_IF_NTFY_SCAN_CHANNEL_RESULT:
978 case WIFI_DBG_IF_NTFY_SCAN_MINI_JOB_START:
979 case WIFI_DBG_IF_NTFY_SCAN_MINI_JOB_COMPLETE:
980 case WIFI_DBG_IF_NTFY_CNCT_ATC_START:
981 case WIFI_DBG_IF_NTFY_COEX_NOTIFICATION:
982 case WIFI_DBG_IF_NTFY_COEX_HANDLE_ENVELOP:
983 case WIFI_DBG_IF_NTFY_COEX_HANDLE_RELEASE_ENVELOP:
984 IWM_DBG_MLME(iwm, DBG, "MLME debug notification: 0x%x\n",
985 notif->status);
986 break;
987 default:
988 IWM_ERR(iwm, "Unhandled notification: 0x%x\n", notif->status);
989 break;
990 }
991
992 return 0;
993}
994
995#define IWM_STATS_UPDATE_INTERVAL (2 * HZ)
996
997static int iwm_ntf_statistics(struct iwm_priv *iwm, u8 *buf,
998 unsigned long buf_size, struct iwm_wifi_cmd *cmd)
999{
1000 struct iwm_umac_notif_stats *stats = (struct iwm_umac_notif_stats *)buf;
1001 struct iw_statistics *wstats = &iwm->wstats;
1002 u16 max_rate = 0;
1003 int i;
1004
1005 IWM_DBG_MLME(iwm, DBG, "Statistics notification received\n");
1006
1007 if (test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)) {
1008 for (i = 0; i < UMAC_NTF_RATE_SAMPLE_NR; i++) {
1009 max_rate = max_t(u16, max_rate,
1010 max(le16_to_cpu(stats->tx_rate[i]),
1011 le16_to_cpu(stats->rx_rate[i])));
1012 }
1013 /* UMAC passes rate info multiplies by 2 */
1014 iwm->rate = max_rate >> 1;
1015 }
1016 iwm->txpower = le32_to_cpu(stats->tx_power);
1017
1018 wstats->status = 0;
1019
1020 wstats->discard.nwid = le32_to_cpu(stats->rx_drop_other_bssid);
1021 wstats->discard.code = le32_to_cpu(stats->rx_drop_decode);
1022 wstats->discard.fragment = le32_to_cpu(stats->rx_drop_reassembly);
1023 wstats->discard.retries = le32_to_cpu(stats->tx_drop_max_retry);
1024
1025 wstats->miss.beacon = le32_to_cpu(stats->missed_beacons);
1026
1027 /* according to cfg80211 */
1028 if (stats->rssi_dbm < -110)
1029 wstats->qual.qual = 0;
1030 else if (stats->rssi_dbm > -40)
1031 wstats->qual.qual = 70;
1032 else
1033 wstats->qual.qual = stats->rssi_dbm + 110;
1034
1035 wstats->qual.level = stats->rssi_dbm;
1036 wstats->qual.noise = stats->noise_dbm;
1037 wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
1038
1039 schedule_delayed_work(&iwm->stats_request, IWM_STATS_UPDATE_INTERVAL);
1040
1041 mod_timer(&iwm->watchdog, round_jiffies(jiffies + IWM_WATCHDOG_PERIOD));
1042
1043 return 0;
1044}
1045
1046static int iwm_ntf_eeprom_proxy(struct iwm_priv *iwm, u8 *buf,
1047 unsigned long buf_size,
1048 struct iwm_wifi_cmd *cmd)
1049{
1050 struct iwm_umac_cmd_eeprom_proxy *eeprom_proxy =
1051 (struct iwm_umac_cmd_eeprom_proxy *)
1052 (buf + sizeof(struct iwm_umac_wifi_in_hdr));
1053 struct iwm_umac_cmd_eeprom_proxy_hdr *hdr = &eeprom_proxy->hdr;
1054 u32 hdr_offset = le32_to_cpu(hdr->offset);
1055 u32 hdr_len = le32_to_cpu(hdr->len);
1056 u32 hdr_type = le32_to_cpu(hdr->type);
1057
1058 IWM_DBG_NTF(iwm, DBG, "type: 0x%x, len: %d, offset: 0x%x\n",
1059 hdr_type, hdr_len, hdr_offset);
1060
1061 if ((hdr_offset + hdr_len) > IWM_EEPROM_LEN)
1062 return -EINVAL;
1063
1064 switch (hdr_type) {
1065 case IWM_UMAC_CMD_EEPROM_TYPE_READ:
1066 memcpy(iwm->eeprom + hdr_offset, eeprom_proxy->buf, hdr_len);
1067 break;
1068 case IWM_UMAC_CMD_EEPROM_TYPE_WRITE:
1069 default:
1070 return -ENOTSUPP;
1071 }
1072
1073 return 0;
1074}
1075
1076static int iwm_ntf_channel_info_list(struct iwm_priv *iwm, u8 *buf,
1077 unsigned long buf_size,
1078 struct iwm_wifi_cmd *cmd)
1079{
1080 struct iwm_umac_cmd_get_channel_list *ch_list =
1081 (struct iwm_umac_cmd_get_channel_list *)
1082 (buf + sizeof(struct iwm_umac_wifi_in_hdr));
1083 struct wiphy *wiphy = iwm_to_wiphy(iwm);
1084 struct ieee80211_supported_band *band;
1085 int i;
1086
1087 band = wiphy->bands[IEEE80211_BAND_2GHZ];
1088
1089 for (i = 0; i < band->n_channels; i++) {
1090 unsigned long ch_mask_0 =
1091 le32_to_cpu(ch_list->ch[0].channels_mask);
1092 unsigned long ch_mask_2 =
1093 le32_to_cpu(ch_list->ch[2].channels_mask);
1094
1095 if (!test_bit(i, &ch_mask_0))
1096 band->channels[i].flags |= IEEE80211_CHAN_DISABLED;
1097
1098 if (!test_bit(i, &ch_mask_2))
1099 band->channels[i].flags |= IEEE80211_CHAN_NO_IBSS;
1100 }
1101
1102 band = wiphy->bands[IEEE80211_BAND_5GHZ];
1103
1104 for (i = 0; i < min(band->n_channels, 32); i++) {
1105 unsigned long ch_mask_1 =
1106 le32_to_cpu(ch_list->ch[1].channels_mask);
1107 unsigned long ch_mask_3 =
1108 le32_to_cpu(ch_list->ch[3].channels_mask);
1109
1110 if (!test_bit(i, &ch_mask_1))
1111 band->channels[i].flags |= IEEE80211_CHAN_DISABLED;
1112
1113 if (!test_bit(i, &ch_mask_3))
1114 band->channels[i].flags |= IEEE80211_CHAN_NO_IBSS;
1115 }
1116
1117 return 0;
1118}
1119
1120static int iwm_ntf_stop_resume_tx(struct iwm_priv *iwm, u8 *buf,
1121 unsigned long buf_size,
1122 struct iwm_wifi_cmd *cmd)
1123{
1124 struct iwm_umac_notif_stop_resume_tx *stp_res_tx =
1125 (struct iwm_umac_notif_stop_resume_tx *)buf;
1126 struct iwm_sta_info *sta_info;
1127 struct iwm_tid_info *tid_info;
1128 u8 sta_id = STA_ID_N_COLOR_ID(stp_res_tx->sta_id);
1129 u16 tid_msk = le16_to_cpu(stp_res_tx->stop_resume_tid_msk);
1130 int bit, ret = 0;
1131 bool stop = false;
1132
1133 IWM_DBG_NTF(iwm, DBG, "stop/resume notification:\n"
1134 "\tflags: 0x%x\n"
1135 "\tSTA id: %d\n"
1136 "\tTID bitmask: 0x%x\n",
1137 stp_res_tx->flags, stp_res_tx->sta_id,
1138 stp_res_tx->stop_resume_tid_msk);
1139
1140 if (stp_res_tx->flags & UMAC_STOP_TX_FLAG)
1141 stop = true;
1142
1143 sta_info = &iwm->sta_table[sta_id];
1144 if (!sta_info->valid) {
1145 IWM_ERR(iwm, "Stoping an invalid STA: %d %d\n",
1146 sta_id, stp_res_tx->sta_id);
1147 return -EINVAL;
1148 }
1149
1150 for_each_set_bit(bit, (unsigned long *)&tid_msk, IWM_UMAC_TID_NR) {
1151 tid_info = &sta_info->tid_info[bit];
1152
1153 mutex_lock(&tid_info->mutex);
1154 tid_info->stopped = stop;
1155 mutex_unlock(&tid_info->mutex);
1156
1157 if (!stop) {
1158 struct iwm_tx_queue *txq;
1159 int queue = iwm_tid_to_queue(bit);
1160
1161 if (queue < 0)
1162 continue;
1163
1164 txq = &iwm->txq[queue];
1165 /*
1166 * If we resume, we have to move our SKBs
1167 * back to the tx queue and queue some work.
1168 */
1169 spin_lock_bh(&txq->lock);
1170 skb_queue_splice_init(&txq->queue, &txq->stopped_queue);
1171 spin_unlock_bh(&txq->lock);
1172
1173 queue_work(txq->wq, &txq->worker);
1174 }
1175
1176 }
1177
1178 /* We send an ACK only for the stop case */
1179 if (stop)
1180 ret = iwm_send_umac_stop_resume_tx(iwm, stp_res_tx);
1181
1182 return ret;
1183}
1184
1185static int iwm_ntf_wifi_if_wrapper(struct iwm_priv *iwm, u8 *buf,
1186 unsigned long buf_size,
1187 struct iwm_wifi_cmd *cmd)
1188{
1189 struct iwm_umac_wifi_if *hdr;
1190
1191 if (cmd == NULL) {
1192 IWM_ERR(iwm, "Couldn't find expected wifi command\n");
1193 return -EINVAL;
1194 }
1195
1196 hdr = (struct iwm_umac_wifi_if *)cmd->buf.payload;
1197
1198 IWM_DBG_NTF(iwm, DBG, "WIFI_IF_WRAPPER cmd is delivered to UMAC: "
1199 "oid is 0x%x\n", hdr->oid);
1200
1201 set_bit(hdr->oid, &iwm->wifi_ntfy[0]);
1202 wake_up_interruptible(&iwm->wifi_ntfy_queue);
1203
1204 switch (hdr->oid) {
1205 case UMAC_WIFI_IF_CMD_SET_PROFILE:
1206 iwm->umac_profile_active = true;
1207 break;
1208 default:
1209 break;
1210 }
1211
1212 return 0;
1213}
1214
1215#define CT_KILL_DELAY (30 * HZ)
1216static int iwm_ntf_card_state(struct iwm_priv *iwm, u8 *buf,
1217 unsigned long buf_size, struct iwm_wifi_cmd *cmd)
1218{
1219 struct wiphy *wiphy = iwm_to_wiphy(iwm);
1220 struct iwm_lmac_card_state *state = (struct iwm_lmac_card_state *)
1221 (buf + sizeof(struct iwm_umac_wifi_in_hdr));
1222 u32 flags = le32_to_cpu(state->flags);
1223
1224 IWM_INFO(iwm, "HW RF Kill %s, CT Kill %s\n",
1225 flags & IWM_CARD_STATE_HW_DISABLED ? "ON" : "OFF",
1226 flags & IWM_CARD_STATE_CTKILL_DISABLED ? "ON" : "OFF");
1227
1228 if (flags & IWM_CARD_STATE_CTKILL_DISABLED) {
1229 /*
1230 * We got a CTKILL event: We bring the interface down in
1231 * oder to cool the device down, and try to bring it up
1232 * 30 seconds later. If it's still too hot, we'll go through
1233 * this code path again.
1234 */
1235 cancel_delayed_work_sync(&iwm->ct_kill_delay);
1236 schedule_delayed_work(&iwm->ct_kill_delay, CT_KILL_DELAY);
1237 }
1238
1239 wiphy_rfkill_set_hw_state(wiphy, flags &
1240 (IWM_CARD_STATE_HW_DISABLED |
1241 IWM_CARD_STATE_CTKILL_DISABLED));
1242
1243 return 0;
1244}
1245
1246static int iwm_rx_handle_wifi(struct iwm_priv *iwm, u8 *buf,
1247 unsigned long buf_size)
1248{
1249 struct iwm_umac_wifi_in_hdr *wifi_hdr;
1250 struct iwm_wifi_cmd *cmd;
1251 u8 source, cmd_id;
1252 u16 seq_num;
1253 u32 count;
1254
1255 wifi_hdr = (struct iwm_umac_wifi_in_hdr *)buf;
1256 cmd_id = wifi_hdr->sw_hdr.cmd.cmd;
1257 source = GET_VAL32(wifi_hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE);
1258 if (source >= IWM_SRC_NUM) {
1259 IWM_CRIT(iwm, "invalid source %d\n", source);
1260 return -EINVAL;
1261 }
1262
1263 if (cmd_id == REPLY_RX_MPDU_CMD)
1264 trace_iwm_rx_packet(iwm, buf, buf_size);
1265 else if ((cmd_id == UMAC_NOTIFY_OPCODE_RX_TICKET) &&
1266 (source == UMAC_HDI_IN_SOURCE_FW))
1267 trace_iwm_rx_ticket(iwm, buf, buf_size);
1268 else
1269 trace_iwm_rx_wifi_cmd(iwm, wifi_hdr);
1270
1271 count = GET_VAL32(wifi_hdr->sw_hdr.meta_data, UMAC_FW_CMD_BYTE_COUNT);
1272 count += sizeof(struct iwm_umac_wifi_in_hdr) -
1273 sizeof(struct iwm_dev_cmd_hdr);
1274 if (count > buf_size) {
1275 IWM_CRIT(iwm, "count %d, buf size:%ld\n", count, buf_size);
1276 return -EINVAL;
1277 }
1278
1279 seq_num = le16_to_cpu(wifi_hdr->sw_hdr.cmd.seq_num);
1280
1281 IWM_DBG_RX(iwm, DBG, "CMD:0x%x, source: 0x%x, seqnum: %d\n",
1282 cmd_id, source, seq_num);
1283
1284 /*
1285 * If this is a response to a previously sent command, there must
1286 * be a pending command for this sequence number.
1287 */
1288 cmd = iwm_get_pending_wifi_cmd(iwm, seq_num);
1289
1290 /* Notify the caller only for sync commands. */
1291 switch (source) {
1292 case UMAC_HDI_IN_SOURCE_FHRX:
1293 if (iwm->lmac_handlers[cmd_id] &&
1294 test_bit(cmd_id, &iwm->lmac_handler_map[0]))
1295 return iwm_notif_send(iwm, cmd, cmd_id, source,
1296 buf, count);
1297 break;
1298 case UMAC_HDI_IN_SOURCE_FW:
1299 if (iwm->umac_handlers[cmd_id] &&
1300 test_bit(cmd_id, &iwm->umac_handler_map[0]))
1301 return iwm_notif_send(iwm, cmd, cmd_id, source,
1302 buf, count);
1303 break;
1304 case UMAC_HDI_IN_SOURCE_UDMA:
1305 break;
1306 }
1307
1308 return iwm_rx_handle_resp(iwm, buf, count, cmd);
1309}
1310
1311int iwm_rx_handle_resp(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size,
1312 struct iwm_wifi_cmd *cmd)
1313{
1314 u8 source, cmd_id;
1315 struct iwm_umac_wifi_in_hdr *wifi_hdr;
1316 int ret = 0;
1317
1318 wifi_hdr = (struct iwm_umac_wifi_in_hdr *)buf;
1319 cmd_id = wifi_hdr->sw_hdr.cmd.cmd;
1320
1321 source = GET_VAL32(wifi_hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE);
1322
1323 IWM_DBG_RX(iwm, DBG, "CMD:0x%x, source: 0x%x\n", cmd_id, source);
1324
1325 switch (source) {
1326 case UMAC_HDI_IN_SOURCE_FHRX:
1327 if (iwm->lmac_handlers[cmd_id])
1328 ret = iwm->lmac_handlers[cmd_id]
1329 (iwm, buf, buf_size, cmd);
1330 break;
1331 case UMAC_HDI_IN_SOURCE_FW:
1332 if (iwm->umac_handlers[cmd_id])
1333 ret = iwm->umac_handlers[cmd_id]
1334 (iwm, buf, buf_size, cmd);
1335 break;
1336 case UMAC_HDI_IN_SOURCE_UDMA:
1337 ret = -EINVAL;
1338 break;
1339 }
1340
1341 kfree(cmd);
1342
1343 return ret;
1344}
1345
1346static int iwm_rx_handle_nonwifi(struct iwm_priv *iwm, u8 *buf,
1347 unsigned long buf_size)
1348{
1349 u8 seq_num;
1350 struct iwm_udma_in_hdr *hdr = (struct iwm_udma_in_hdr *)buf;
1351 struct iwm_nonwifi_cmd *cmd;
1352
1353 trace_iwm_rx_nonwifi_cmd(iwm, buf, buf_size);
1354 seq_num = GET_VAL32(hdr->cmd, UDMA_HDI_IN_CMD_NON_WIFI_HW_SEQ_NUM);
1355
1356 /*
1357 * We received a non wifi answer.
1358 * Let's check if there's a pending command for it, and if so
1359 * replace the command payload with the buffer, and then wake the
1360 * callers up.
1361 * That means we only support synchronised non wifi command response
1362 * schemes.
1363 */
1364 list_for_each_entry(cmd, &iwm->nonwifi_pending_cmd, pending)
1365 if (cmd->seq_num == seq_num) {
1366 cmd->resp_received = true;
1367 cmd->buf.len = buf_size;
1368 memcpy(cmd->buf.hdr, buf, buf_size);
1369 wake_up_interruptible(&iwm->nonwifi_queue);
1370 }
1371
1372 return 0;
1373}
1374
1375static int iwm_rx_handle_umac(struct iwm_priv *iwm, u8 *buf,
1376 unsigned long buf_size)
1377{
1378 int ret = 0;
1379 u8 op_code;
1380 unsigned long buf_offset = 0;
1381 struct iwm_udma_in_hdr *hdr;
1382
1383 /*
1384 * To allow for a more efficient bus usage, UMAC
1385 * messages are encapsulated into UDMA ones. This
1386 * way we can have several UMAC messages in one bus
1387 * transfer.
1388 * A UDMA frame size is always aligned on 16 bytes,
1389 * and a UDMA frame must not start with a UMAC_PAD_TERMINAL
1390 * word. This is how we parse a bus frame into several
1391 * UDMA ones.
1392 */
1393 while (buf_offset < buf_size) {
1394
1395 hdr = (struct iwm_udma_in_hdr *)(buf + buf_offset);
1396
1397 if (iwm_rx_check_udma_hdr(hdr) < 0) {
1398 IWM_DBG_RX(iwm, DBG, "End of frame\n");
1399 break;
1400 }
1401
1402 op_code = GET_VAL32(hdr->cmd, UMAC_HDI_IN_CMD_OPCODE);
1403
1404 IWM_DBG_RX(iwm, DBG, "Op code: 0x%x\n", op_code);
1405
1406 if (op_code == UMAC_HDI_IN_OPCODE_WIFI) {
1407 ret |= iwm_rx_handle_wifi(iwm, buf + buf_offset,
1408 buf_size - buf_offset);
1409 } else if (op_code < UMAC_HDI_IN_OPCODE_NONWIFI_MAX) {
1410 if (GET_VAL32(hdr->cmd,
1411 UDMA_HDI_IN_CMD_NON_WIFI_HW_SIG) !=
1412 UDMA_HDI_IN_CMD_NON_WIFI_HW_SIG) {
1413 IWM_ERR(iwm, "Incorrect hw signature\n");
1414 return -EINVAL;
1415 }
1416 ret |= iwm_rx_handle_nonwifi(iwm, buf + buf_offset,
1417 buf_size - buf_offset);
1418 } else {
1419 IWM_ERR(iwm, "Invalid RX opcode: 0x%x\n", op_code);
1420 ret |= -EINVAL;
1421 }
1422
1423 buf_offset += iwm_rx_resp_size(hdr);
1424 }
1425
1426 return ret;
1427}
1428
1429int iwm_rx_handle(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size)
1430{
1431 struct iwm_udma_in_hdr *hdr;
1432
1433 hdr = (struct iwm_udma_in_hdr *)buf;
1434
1435 switch (le32_to_cpu(hdr->cmd)) {
1436 case UMAC_REBOOT_BARKER:
1437 if (test_bit(IWM_STATUS_READY, &iwm->status)) {
1438 IWM_ERR(iwm, "Unexpected BARKER\n");
1439
1440 schedule_work(&iwm->reset_worker);
1441
1442 return 0;
1443 }
1444
1445 return iwm_notif_send(iwm, NULL, IWM_BARKER_REBOOT_NOTIFICATION,
1446 IWM_SRC_UDMA, buf, buf_size);
1447 case UMAC_ACK_BARKER:
1448 return iwm_notif_send(iwm, NULL, IWM_ACK_BARKER_NOTIFICATION,
1449 IWM_SRC_UDMA, NULL, 0);
1450 default:
1451 IWM_DBG_RX(iwm, DBG, "Received cmd: 0x%x\n", hdr->cmd);
1452 return iwm_rx_handle_umac(iwm, buf, buf_size);
1453 }
1454
1455 return 0;
1456}
1457
1458static const iwm_handler iwm_umac_handlers[] =
1459{
1460 [UMAC_NOTIFY_OPCODE_ERROR] = iwm_ntf_error,
1461 [UMAC_NOTIFY_OPCODE_ALIVE] = iwm_ntf_umac_alive,
1462 [UMAC_NOTIFY_OPCODE_INIT_COMPLETE] = iwm_ntf_init_complete,
1463 [UMAC_NOTIFY_OPCODE_WIFI_CORE_STATUS] = iwm_ntf_wifi_status,
1464 [UMAC_NOTIFY_OPCODE_WIFI_IF_WRAPPER] = iwm_ntf_mlme,
1465 [UMAC_NOTIFY_OPCODE_PAGE_DEALLOC] = iwm_ntf_tx_credit_update,
1466 [UMAC_NOTIFY_OPCODE_RX_TICKET] = iwm_ntf_rx_ticket,
1467 [UMAC_CMD_OPCODE_RESET] = iwm_ntf_umac_reset,
1468 [UMAC_NOTIFY_OPCODE_STATS] = iwm_ntf_statistics,
1469 [UMAC_CMD_OPCODE_EEPROM_PROXY] = iwm_ntf_eeprom_proxy,
1470 [UMAC_CMD_OPCODE_GET_CHAN_INFO_LIST] = iwm_ntf_channel_info_list,
1471 [UMAC_CMD_OPCODE_STOP_RESUME_STA_TX] = iwm_ntf_stop_resume_tx,
1472 [REPLY_RX_MPDU_CMD] = iwm_ntf_rx_packet,
1473 [UMAC_CMD_OPCODE_WIFI_IF_WRAPPER] = iwm_ntf_wifi_if_wrapper,
1474};
1475
1476static const iwm_handler iwm_lmac_handlers[] =
1477{
1478 [REPLY_TX] = iwm_ntf_tx,
1479 [REPLY_ALIVE] = iwm_ntf_lmac_version,
1480 [CALIBRATION_RES_NOTIFICATION] = iwm_ntf_calib_res,
1481 [CALIBRATION_COMPLETE_NOTIFICATION] = iwm_ntf_calib_complete,
1482 [CALIBRATION_CFG_CMD] = iwm_ntf_calib_cfg,
1483 [REPLY_RX_MPDU_CMD] = iwm_ntf_rx_packet,
1484 [CARD_STATE_NOTIFICATION] = iwm_ntf_card_state,
1485};
1486
1487void iwm_rx_setup_handlers(struct iwm_priv *iwm)
1488{
1489 iwm->umac_handlers = (iwm_handler *) iwm_umac_handlers;
1490 iwm->lmac_handlers = (iwm_handler *) iwm_lmac_handlers;
1491}
1492
1493static void iwm_remove_iv(struct sk_buff *skb, u32 hdr_total_len)
1494{
1495 struct ieee80211_hdr *hdr;
1496 unsigned int hdr_len;
1497
1498 hdr = (struct ieee80211_hdr *)skb->data;
1499
1500 if (!ieee80211_has_protected(hdr->frame_control))
1501 return;
1502
1503 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1504 if (hdr_total_len <= hdr_len)
1505 return;
1506
1507 memmove(skb->data + (hdr_total_len - hdr_len), skb->data, hdr_len);
1508 skb_pull(skb, (hdr_total_len - hdr_len));
1509}
1510
1511static void iwm_rx_adjust_packet(struct iwm_priv *iwm,
1512 struct iwm_rx_packet *packet,
1513 struct iwm_rx_ticket_node *ticket_node)
1514{
1515 u32 payload_offset = 0, payload_len;
1516 struct iwm_rx_ticket *ticket = ticket_node->ticket;
1517 struct iwm_rx_mpdu_hdr *mpdu_hdr;
1518 struct ieee80211_hdr *hdr;
1519
1520 mpdu_hdr = (struct iwm_rx_mpdu_hdr *)packet->skb->data;
1521 payload_offset += sizeof(struct iwm_rx_mpdu_hdr);
1522 /* Padding is 0 or 2 bytes */
1523 payload_len = le16_to_cpu(mpdu_hdr->len) +
1524 (le16_to_cpu(ticket->flags) & IWM_RX_TICKET_PAD_SIZE_MSK);
1525 payload_len -= ticket->tail_len;
1526
1527 IWM_DBG_RX(iwm, DBG, "Packet adjusted, len:%d, offset:%d, "
1528 "ticket offset:%d ticket tail len:%d\n",
1529 payload_len, payload_offset, ticket->payload_offset,
1530 ticket->tail_len);
1531
1532 IWM_HEXDUMP(iwm, DBG, RX, "RAW: ", packet->skb->data, packet->skb->len);
1533
1534 skb_pull(packet->skb, payload_offset);
1535 skb_trim(packet->skb, payload_len);
1536
1537 iwm_remove_iv(packet->skb, ticket->payload_offset);
1538
1539 hdr = (struct ieee80211_hdr *) packet->skb->data;
1540 if (ieee80211_is_data_qos(hdr->frame_control)) {
1541 /* UMAC handed QOS_DATA frame with 2 padding bytes appended
1542 * to the qos_ctl field in IEEE 802.11 headers. */
1543 memmove(packet->skb->data + IEEE80211_QOS_CTL_LEN + 2,
1544 packet->skb->data,
1545 ieee80211_hdrlen(hdr->frame_control) -
1546 IEEE80211_QOS_CTL_LEN);
1547 hdr = (struct ieee80211_hdr *) skb_pull(packet->skb,
1548 IEEE80211_QOS_CTL_LEN + 2);
1549 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1550 }
1551
1552 IWM_HEXDUMP(iwm, DBG, RX, "ADJUSTED: ",
1553 packet->skb->data, packet->skb->len);
1554}
1555
1556static void classify8023(struct sk_buff *skb)
1557{
1558 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1559
1560 if (ieee80211_is_data_qos(hdr->frame_control)) {
1561 u8 *qc = ieee80211_get_qos_ctl(hdr);
1562 /* frame has qos control */
1563 skb->priority = *qc & IEEE80211_QOS_CTL_TID_MASK;
1564 } else {
1565 skb->priority = 0;
1566 }
1567}
1568
1569static void iwm_rx_process_amsdu(struct iwm_priv *iwm, struct sk_buff *skb)
1570{
1571 struct wireless_dev *wdev = iwm_to_wdev(iwm);
1572 struct net_device *ndev = iwm_to_ndev(iwm);
1573 struct sk_buff_head list;
1574 struct sk_buff *frame;
1575
1576 IWM_HEXDUMP(iwm, DBG, RX, "A-MSDU: ", skb->data, skb->len);
1577
1578 __skb_queue_head_init(&list);
1579 ieee80211_amsdu_to_8023s(skb, &list, ndev->dev_addr, wdev->iftype, 0,
1580 true);
1581
1582 while ((frame = __skb_dequeue(&list))) {
1583 ndev->stats.rx_packets++;
1584 ndev->stats.rx_bytes += frame->len;
1585
1586 frame->protocol = eth_type_trans(frame, ndev);
1587 frame->ip_summed = CHECKSUM_NONE;
1588 memset(frame->cb, 0, sizeof(frame->cb));
1589
1590 if (netif_rx_ni(frame) == NET_RX_DROP) {
1591 IWM_ERR(iwm, "Packet dropped\n");
1592 ndev->stats.rx_dropped++;
1593 }
1594 }
1595}
1596
1597static void iwm_rx_process_packet(struct iwm_priv *iwm,
1598 struct iwm_rx_packet *packet,
1599 struct iwm_rx_ticket_node *ticket_node)
1600{
1601 int ret;
1602 struct sk_buff *skb = packet->skb;
1603 struct wireless_dev *wdev = iwm_to_wdev(iwm);
1604 struct net_device *ndev = iwm_to_ndev(iwm);
1605
1606 IWM_DBG_RX(iwm, DBG, "Processing packet ID %d\n", packet->id);
1607
1608 switch (le16_to_cpu(ticket_node->ticket->action)) {
1609 case IWM_RX_TICKET_RELEASE:
1610 IWM_DBG_RX(iwm, DBG, "RELEASE packet\n");
1611
1612 iwm_rx_adjust_packet(iwm, packet, ticket_node);
1613 skb->dev = iwm_to_ndev(iwm);
1614 classify8023(skb);
1615
1616 if (le16_to_cpu(ticket_node->ticket->flags) &
1617 IWM_RX_TICKET_AMSDU_MSK) {
1618 iwm_rx_process_amsdu(iwm, skb);
1619 break;
1620 }
1621
1622 ret = ieee80211_data_to_8023(skb, ndev->dev_addr, wdev->iftype);
1623 if (ret < 0) {
1624 IWM_DBG_RX(iwm, DBG, "Couldn't convert 802.11 header - "
1625 "%d\n", ret);
1626 kfree_skb(packet->skb);
1627 break;
1628 }
1629
1630 IWM_HEXDUMP(iwm, DBG, RX, "802.3: ", skb->data, skb->len);
1631
1632 ndev->stats.rx_packets++;
1633 ndev->stats.rx_bytes += skb->len;
1634
1635 skb->protocol = eth_type_trans(skb, ndev);
1636 skb->ip_summed = CHECKSUM_NONE;
1637 memset(skb->cb, 0, sizeof(skb->cb));
1638
1639 if (netif_rx_ni(skb) == NET_RX_DROP) {
1640 IWM_ERR(iwm, "Packet dropped\n");
1641 ndev->stats.rx_dropped++;
1642 }
1643 break;
1644 case IWM_RX_TICKET_DROP:
1645 IWM_DBG_RX(iwm, DBG, "DROP packet: 0x%x\n",
1646 le16_to_cpu(ticket_node->ticket->flags));
1647 kfree_skb(packet->skb);
1648 break;
1649 default:
1650 IWM_ERR(iwm, "Unknown ticket action: %d\n",
1651 le16_to_cpu(ticket_node->ticket->action));
1652 kfree_skb(packet->skb);
1653 }
1654
1655 kfree(packet);
1656 iwm_rx_ticket_node_free(ticket_node);
1657}
1658
1659/*
1660 * Rx data processing:
1661 *
1662 * We're receiving Rx packet from the LMAC, and Rx ticket from
1663 * the UMAC.
1664 * To forward a target data packet upstream (i.e. to the
1665 * kernel network stack), we must have received an Rx ticket
1666 * that tells us we're allowed to release this packet (ticket
1667 * action is IWM_RX_TICKET_RELEASE). The Rx ticket also indicates,
1668 * among other things, where valid data actually starts in the Rx
1669 * packet.
1670 */
1671void iwm_rx_worker(struct work_struct *work)
1672{
1673 struct iwm_priv *iwm;
1674 struct iwm_rx_ticket_node *ticket, *next;
1675
1676 iwm = container_of(work, struct iwm_priv, rx_worker);
1677
1678 /*
1679 * We go through the tickets list and if there is a pending
1680 * packet for it, we push it upstream.
1681 * We stop whenever a ticket is missing its packet, as we're
1682 * supposed to send the packets in order.
1683 */
1684 spin_lock(&iwm->ticket_lock);
1685 list_for_each_entry_safe(ticket, next, &iwm->rx_tickets, node) {
1686 struct iwm_rx_packet *packet =
1687 iwm_rx_packet_get(iwm, le16_to_cpu(ticket->ticket->id));
1688
1689 if (!packet) {
1690 IWM_DBG_RX(iwm, DBG, "Skip rx_work: Wait for ticket %d "
1691 "to be handled first\n",
1692 le16_to_cpu(ticket->ticket->id));
1693 break;
1694 }
1695
1696 list_del(&ticket->node);
1697 iwm_rx_process_packet(iwm, packet, ticket);
1698 }
1699 spin_unlock(&iwm->ticket_lock);
1700}
1701
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.h b/drivers/net/wireless/iwmc3200wifi/rx.h
deleted file mode 100644
index da0db91cee59..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/rx.h
+++ /dev/null
@@ -1,60 +0,0 @@
1/*
2 * Intel Wireless Multicomm 3200 WiFi driver
3 *
4 * Copyright (C) 2009 Intel Corporation. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 *
33 * Intel Corporation <ilw@linux.intel.com>
34 * Samuel Ortiz <samuel.ortiz@intel.com>
35 * Zhu Yi <yi.zhu@intel.com>
36 *
37 */
38
39#ifndef __IWM_RX_H__
40#define __IWM_RX_H__
41
42#include <linux/skbuff.h>
43
44#include "umac.h"
45
46struct iwm_rx_ticket_node {
47 struct list_head node;
48 struct iwm_rx_ticket *ticket;
49};
50
51struct iwm_rx_packet {
52 struct list_head node;
53 u16 id;
54 struct sk_buff *skb;
55 unsigned long pkt_size;
56};
57
58void iwm_rx_worker(struct work_struct *work);
59
60#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.c b/drivers/net/wireless/iwmc3200wifi/sdio.c
deleted file mode 100644
index 0042f204b07f..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/sdio.c
+++ /dev/null
@@ -1,509 +0,0 @@
1/*
2 * Intel Wireless Multicomm 3200 WiFi driver
3 *
4 * Copyright (C) 2009 Intel Corporation. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 *
33 * Intel Corporation <ilw@linux.intel.com>
34 * Samuel Ortiz <samuel.ortiz@intel.com>
35 * Zhu Yi <yi.zhu@intel.com>
36 *
37 */
38
39/*
40 * This is the SDIO bus specific hooks for iwm.
41 * It also is the module's entry point.
42 *
43 * Interesting code paths:
44 * iwm_sdio_probe() (Called by an SDIO bus scan)
45 * -> iwm_if_alloc() (netdev.c)
46 * -> iwm_wdev_alloc() (cfg80211.c, allocates and register our wiphy)
47 * -> wiphy_new()
48 * -> wiphy_register()
49 * -> alloc_netdev_mq()
50 * -> register_netdev()
51 *
52 * iwm_sdio_remove()
53 * -> iwm_if_free() (netdev.c)
54 * -> unregister_netdev()
55 * -> iwm_wdev_free() (cfg80211.c)
56 * -> wiphy_unregister()
57 * -> wiphy_free()
58 *
59 * iwm_sdio_isr() (called in process context from the SDIO core code)
60 * -> queue_work(.., isr_worker)
61 * -- [async] --> iwm_sdio_isr_worker()
62 * -> iwm_rx_handle()
63 */
64
65#include <linux/kernel.h>
66#include <linux/module.h>
67#include <linux/slab.h>
68#include <linux/netdevice.h>
69#include <linux/debugfs.h>
70#include <linux/mmc/sdio_ids.h>
71#include <linux/mmc/sdio.h>
72#include <linux/mmc/sdio_func.h>
73
74#include "iwm.h"
75#include "debug.h"
76#include "bus.h"
77#include "sdio.h"
78
79static void iwm_sdio_isr_worker(struct work_struct *work)
80{
81 struct iwm_sdio_priv *hw;
82 struct iwm_priv *iwm;
83 struct iwm_rx_info *rx_info;
84 struct sk_buff *skb;
85 u8 *rx_buf;
86 unsigned long rx_size;
87
88 hw = container_of(work, struct iwm_sdio_priv, isr_worker);
89 iwm = hw_to_iwm(hw);
90
91 while (!skb_queue_empty(&iwm->rx_list)) {
92 skb = skb_dequeue(&iwm->rx_list);
93 rx_info = skb_to_rx_info(skb);
94 rx_size = rx_info->rx_size;
95 rx_buf = skb->data;
96
97 IWM_HEXDUMP(iwm, DBG, SDIO, "RX: ", rx_buf, rx_size);
98 if (iwm_rx_handle(iwm, rx_buf, rx_size) < 0)
99 IWM_WARN(iwm, "RX error\n");
100
101 kfree_skb(skb);
102 }
103}
104
105static void iwm_sdio_isr(struct sdio_func *func)
106{
107 struct iwm_priv *iwm;
108 struct iwm_sdio_priv *hw;
109 struct iwm_rx_info *rx_info;
110 struct sk_buff *skb;
111 unsigned long buf_size, read_size;
112 int ret;
113 u8 val;
114
115 hw = sdio_get_drvdata(func);
116 iwm = hw_to_iwm(hw);
117
118 buf_size = hw->blk_size;
119
120 /* We're checking the status */
121 val = sdio_readb(func, IWM_SDIO_INTR_STATUS_ADDR, &ret);
122 if (val == 0 || ret < 0) {
123 IWM_ERR(iwm, "Wrong INTR_STATUS\n");
124 return;
125 }
126
127 /* See if we have free buffers */
128 if (skb_queue_len(&iwm->rx_list) > IWM_RX_LIST_SIZE) {
129 IWM_ERR(iwm, "No buffer for more Rx frames\n");
130 return;
131 }
132
133 /* We first read the transaction size */
134 read_size = sdio_readb(func, IWM_SDIO_INTR_GET_SIZE_ADDR + 1, &ret);
135 read_size = read_size << 8;
136
137 if (ret < 0) {
138 IWM_ERR(iwm, "Couldn't read the xfer size\n");
139 return;
140 }
141
142 /* We need to clear the INT register */
143 sdio_writeb(func, 1, IWM_SDIO_INTR_CLEAR_ADDR, &ret);
144 if (ret < 0) {
145 IWM_ERR(iwm, "Couldn't clear the INT register\n");
146 return;
147 }
148
149 while (buf_size < read_size)
150 buf_size <<= 1;
151
152 skb = dev_alloc_skb(buf_size);
153 if (!skb) {
154 IWM_ERR(iwm, "Couldn't alloc RX skb\n");
155 return;
156 }
157 rx_info = skb_to_rx_info(skb);
158 rx_info->rx_size = read_size;
159 rx_info->rx_buf_size = buf_size;
160
161 /* Now we can read the actual buffer */
162 ret = sdio_memcpy_fromio(func, skb_put(skb, read_size),
163 IWM_SDIO_DATA_ADDR, read_size);
164
165 /* The skb is put on a driver's specific Rx SKB list */
166 skb_queue_tail(&iwm->rx_list, skb);
167
168 /* We can now schedule the actual worker */
169 queue_work(hw->isr_wq, &hw->isr_worker);
170}
171
172static void iwm_sdio_rx_free(struct iwm_sdio_priv *hw)
173{
174 struct iwm_priv *iwm = hw_to_iwm(hw);
175
176 flush_workqueue(hw->isr_wq);
177
178 skb_queue_purge(&iwm->rx_list);
179}
180
181/* Bus ops */
182static int if_sdio_enable(struct iwm_priv *iwm)
183{
184 struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
185 int ret;
186
187 sdio_claim_host(hw->func);
188
189 ret = sdio_enable_func(hw->func);
190 if (ret) {
191 IWM_ERR(iwm, "Couldn't enable the device: is TOP driver "
192 "loaded and functional?\n");
193 goto release_host;
194 }
195
196 iwm_reset(iwm);
197
198 ret = sdio_claim_irq(hw->func, iwm_sdio_isr);
199 if (ret) {
200 IWM_ERR(iwm, "Failed to claim irq: %d\n", ret);
201 goto release_host;
202 }
203
204 sdio_writeb(hw->func, 1, IWM_SDIO_INTR_ENABLE_ADDR, &ret);
205 if (ret < 0) {
206 IWM_ERR(iwm, "Couldn't enable INTR: %d\n", ret);
207 goto release_irq;
208 }
209
210 sdio_release_host(hw->func);
211
212 IWM_DBG_SDIO(iwm, INFO, "IWM SDIO enable\n");
213
214 return 0;
215
216 release_irq:
217 sdio_release_irq(hw->func);
218 release_host:
219 sdio_release_host(hw->func);
220
221 return ret;
222}
223
224static int if_sdio_disable(struct iwm_priv *iwm)
225{
226 struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
227 int ret;
228
229 sdio_claim_host(hw->func);
230 sdio_writeb(hw->func, 0, IWM_SDIO_INTR_ENABLE_ADDR, &ret);
231 if (ret < 0)
232 IWM_WARN(iwm, "Couldn't disable INTR: %d\n", ret);
233
234 sdio_release_irq(hw->func);
235 sdio_disable_func(hw->func);
236 sdio_release_host(hw->func);
237
238 iwm_sdio_rx_free(hw);
239
240 iwm_reset(iwm);
241
242 IWM_DBG_SDIO(iwm, INFO, "IWM SDIO disable\n");
243
244 return 0;
245}
246
247static int if_sdio_send_chunk(struct iwm_priv *iwm, u8 *buf, int count)
248{
249 struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
250 int aligned_count = ALIGN(count, hw->blk_size);
251 int ret;
252
253 if ((unsigned long)buf & 0x3) {
254 IWM_ERR(iwm, "buf <%p> is not dword aligned\n", buf);
255 /* TODO: Is this a hardware limitation? use get_unligned */
256 return -EINVAL;
257 }
258
259 sdio_claim_host(hw->func);
260 ret = sdio_memcpy_toio(hw->func, IWM_SDIO_DATA_ADDR, buf,
261 aligned_count);
262 sdio_release_host(hw->func);
263
264 return ret;
265}
266
267static ssize_t iwm_debugfs_sdio_read(struct file *filp, char __user *buffer,
268 size_t count, loff_t *ppos)
269{
270 struct iwm_priv *iwm = filp->private_data;
271 struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
272 char *buf;
273 u8 cccr;
274 int buf_len = 4096, ret;
275 size_t len = 0;
276
277 if (*ppos != 0)
278 return 0;
279 if (count < sizeof(buf))
280 return -ENOSPC;
281
282 buf = kzalloc(buf_len, GFP_KERNEL);
283 if (!buf)
284 return -ENOMEM;
285
286 sdio_claim_host(hw->func);
287
288 cccr = sdio_f0_readb(hw->func, SDIO_CCCR_IOEx, &ret);
289 if (ret) {
290 IWM_ERR(iwm, "Could not read SDIO_CCCR_IOEx\n");
291 goto err;
292 }
293 len += snprintf(buf + len, buf_len - len, "CCCR_IOEx: 0x%x\n", cccr);
294
295 cccr = sdio_f0_readb(hw->func, SDIO_CCCR_IORx, &ret);
296 if (ret) {
297 IWM_ERR(iwm, "Could not read SDIO_CCCR_IORx\n");
298 goto err;
299 }
300 len += snprintf(buf + len, buf_len - len, "CCCR_IORx: 0x%x\n", cccr);
301
302
303 cccr = sdio_f0_readb(hw->func, SDIO_CCCR_IENx, &ret);
304 if (ret) {
305 IWM_ERR(iwm, "Could not read SDIO_CCCR_IENx\n");
306 goto err;
307 }
308 len += snprintf(buf + len, buf_len - len, "CCCR_IENx: 0x%x\n", cccr);
309
310
311 cccr = sdio_f0_readb(hw->func, SDIO_CCCR_INTx, &ret);
312 if (ret) {
313 IWM_ERR(iwm, "Could not read SDIO_CCCR_INTx\n");
314 goto err;
315 }
316 len += snprintf(buf + len, buf_len - len, "CCCR_INTx: 0x%x\n", cccr);
317
318
319 cccr = sdio_f0_readb(hw->func, SDIO_CCCR_ABORT, &ret);
320 if (ret) {
321 IWM_ERR(iwm, "Could not read SDIO_CCCR_ABORTx\n");
322 goto err;
323 }
324 len += snprintf(buf + len, buf_len - len, "CCCR_ABORT: 0x%x\n", cccr);
325
326 cccr = sdio_f0_readb(hw->func, SDIO_CCCR_IF, &ret);
327 if (ret) {
328 IWM_ERR(iwm, "Could not read SDIO_CCCR_IF\n");
329 goto err;
330 }
331 len += snprintf(buf + len, buf_len - len, "CCCR_IF: 0x%x\n", cccr);
332
333
334 cccr = sdio_f0_readb(hw->func, SDIO_CCCR_CAPS, &ret);
335 if (ret) {
336 IWM_ERR(iwm, "Could not read SDIO_CCCR_CAPS\n");
337 goto err;
338 }
339 len += snprintf(buf + len, buf_len - len, "CCCR_CAPS: 0x%x\n", cccr);
340
341 cccr = sdio_f0_readb(hw->func, SDIO_CCCR_CIS, &ret);
342 if (ret) {
343 IWM_ERR(iwm, "Could not read SDIO_CCCR_CIS\n");
344 goto err;
345 }
346 len += snprintf(buf + len, buf_len - len, "CCCR_CIS: 0x%x\n", cccr);
347
348 ret = simple_read_from_buffer(buffer, len, ppos, buf, buf_len);
349err:
350 sdio_release_host(hw->func);
351
352 kfree(buf);
353
354 return ret;
355}
356
357static const struct file_operations iwm_debugfs_sdio_fops = {
358 .owner = THIS_MODULE,
359 .open = simple_open,
360 .read = iwm_debugfs_sdio_read,
361 .llseek = default_llseek,
362};
363
364static void if_sdio_debugfs_init(struct iwm_priv *iwm, struct dentry *parent_dir)
365{
366 struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
367
368 hw->cccr_dentry = debugfs_create_file("cccr", 0200,
369 parent_dir, iwm,
370 &iwm_debugfs_sdio_fops);
371}
372
373static void if_sdio_debugfs_exit(struct iwm_priv *iwm)
374{
375 struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
376
377 debugfs_remove(hw->cccr_dentry);
378}
379
380static struct iwm_if_ops if_sdio_ops = {
381 .enable = if_sdio_enable,
382 .disable = if_sdio_disable,
383 .send_chunk = if_sdio_send_chunk,
384 .debugfs_init = if_sdio_debugfs_init,
385 .debugfs_exit = if_sdio_debugfs_exit,
386 .umac_name = "iwmc3200wifi-umac-sdio.bin",
387 .calib_lmac_name = "iwmc3200wifi-calib-sdio.bin",
388 .lmac_name = "iwmc3200wifi-lmac-sdio.bin",
389};
390MODULE_FIRMWARE("iwmc3200wifi-umac-sdio.bin");
391MODULE_FIRMWARE("iwmc3200wifi-calib-sdio.bin");
392MODULE_FIRMWARE("iwmc3200wifi-lmac-sdio.bin");
393
394static int iwm_sdio_probe(struct sdio_func *func,
395 const struct sdio_device_id *id)
396{
397 struct iwm_priv *iwm;
398 struct iwm_sdio_priv *hw;
399 struct device *dev = &func->dev;
400 int ret;
401
402 /* check if TOP has already initialized the card */
403 sdio_claim_host(func);
404 ret = sdio_enable_func(func);
405 if (ret) {
406 dev_err(dev, "wait for TOP to enable the device\n");
407 sdio_release_host(func);
408 return ret;
409 }
410
411 ret = sdio_set_block_size(func, IWM_SDIO_BLK_SIZE);
412
413 sdio_disable_func(func);
414 sdio_release_host(func);
415
416 if (ret < 0) {
417 dev_err(dev, "Failed to set block size: %d\n", ret);
418 return ret;
419 }
420
421 iwm = iwm_if_alloc(sizeof(struct iwm_sdio_priv), dev, &if_sdio_ops);
422 if (IS_ERR(iwm)) {
423 dev_err(dev, "allocate SDIO interface failed\n");
424 return PTR_ERR(iwm);
425 }
426
427 hw = iwm_private(iwm);
428 hw->iwm = iwm;
429
430 iwm_debugfs_init(iwm);
431
432 sdio_set_drvdata(func, hw);
433
434 hw->func = func;
435 hw->blk_size = IWM_SDIO_BLK_SIZE;
436
437 hw->isr_wq = create_singlethread_workqueue(KBUILD_MODNAME "_sdio");
438 if (!hw->isr_wq) {
439 ret = -ENOMEM;
440 goto debugfs_exit;
441 }
442
443 INIT_WORK(&hw->isr_worker, iwm_sdio_isr_worker);
444
445 ret = iwm_if_add(iwm);
446 if (ret) {
447 dev_err(dev, "add SDIO interface failed\n");
448 goto destroy_wq;
449 }
450
451 dev_info(dev, "IWM SDIO probe\n");
452
453 return 0;
454
455 destroy_wq:
456 destroy_workqueue(hw->isr_wq);
457 debugfs_exit:
458 iwm_debugfs_exit(iwm);
459 iwm_if_free(iwm);
460 return ret;
461}
462
463static void iwm_sdio_remove(struct sdio_func *func)
464{
465 struct iwm_sdio_priv *hw = sdio_get_drvdata(func);
466 struct iwm_priv *iwm = hw_to_iwm(hw);
467 struct device *dev = &func->dev;
468
469 iwm_if_remove(iwm);
470 destroy_workqueue(hw->isr_wq);
471 iwm_debugfs_exit(iwm);
472 iwm_if_free(iwm);
473
474 sdio_set_drvdata(func, NULL);
475
476 dev_info(dev, "IWM SDIO remove\n");
477}
478
479static const struct sdio_device_id iwm_sdio_ids[] = {
480 /* Global/AGN SKU */
481 { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1403) },
482 /* BGN SKU */
483 { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1408) },
484 { /* end: all zeroes */ },
485};
486MODULE_DEVICE_TABLE(sdio, iwm_sdio_ids);
487
488static struct sdio_driver iwm_sdio_driver = {
489 .name = "iwm_sdio",
490 .id_table = iwm_sdio_ids,
491 .probe = iwm_sdio_probe,
492 .remove = iwm_sdio_remove,
493};
494
495static int __init iwm_sdio_init_module(void)
496{
497 return sdio_register_driver(&iwm_sdio_driver);
498}
499
500static void __exit iwm_sdio_exit_module(void)
501{
502 sdio_unregister_driver(&iwm_sdio_driver);
503}
504
505module_init(iwm_sdio_init_module);
506module_exit(iwm_sdio_exit_module);
507
508MODULE_LICENSE("GPL");
509MODULE_AUTHOR(IWM_COPYRIGHT " " IWM_AUTHOR);
diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.h b/drivers/net/wireless/iwmc3200wifi/sdio.h
deleted file mode 100644
index aab6b6892e45..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/sdio.h
+++ /dev/null
@@ -1,64 +0,0 @@
1/*
2 * Intel Wireless Multicomm 3200 WiFi driver
3 *
4 * Copyright (C) 2009 Intel Corporation. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 *
33 * Intel Corporation <ilw@linux.intel.com>
34 * Samuel Ortiz <samuel.ortiz@intel.com>
35 * Zhu Yi <yi.zhu@intel.com>
36 *
37 */
38
39#ifndef __IWM_SDIO_H__
40#define __IWM_SDIO_H__
41
42#define IWM_SDIO_DATA_ADDR 0x0
43#define IWM_SDIO_INTR_ENABLE_ADDR 0x14
44#define IWM_SDIO_INTR_STATUS_ADDR 0x13
45#define IWM_SDIO_INTR_CLEAR_ADDR 0x13
46#define IWM_SDIO_INTR_GET_SIZE_ADDR 0x2C
47
48#define IWM_SDIO_BLK_SIZE 256
49
50#define iwm_to_if_sdio(i) (struct iwm_sdio_priv *)(iwm->private)
51
52struct iwm_sdio_priv {
53 struct sdio_func *func;
54 struct iwm_priv *iwm;
55
56 struct workqueue_struct *isr_wq;
57 struct work_struct isr_worker;
58
59 struct dentry *cccr_dentry;
60
61 unsigned int blk_size;
62};
63
64#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/trace.c b/drivers/net/wireless/iwmc3200wifi/trace.c
deleted file mode 100644
index 904d36f22311..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/trace.c
+++ /dev/null
@@ -1,3 +0,0 @@
1#include "iwm.h"
2#define CREATE_TRACE_POINTS
3#include "trace.h"
diff --git a/drivers/net/wireless/iwmc3200wifi/trace.h b/drivers/net/wireless/iwmc3200wifi/trace.h
deleted file mode 100644
index f5f7070b7e22..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/trace.h
+++ /dev/null
@@ -1,283 +0,0 @@
1#if !defined(__IWM_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ)
2#define __IWM_TRACE_H__
3
4#include <linux/tracepoint.h>
5
6#if !defined(CONFIG_IWM_TRACING)
7#undef TRACE_EVENT
8#define TRACE_EVENT(name, proto, ...) \
9static inline void trace_ ## name(proto) {}
10#endif
11
12#undef TRACE_SYSTEM
13#define TRACE_SYSTEM iwm
14
15#define IWM_ENTRY __array(char, ndev_name, 16)
16#define IWM_ASSIGN strlcpy(__entry->ndev_name, iwm_to_ndev(iwm)->name, 16)
17#define IWM_PR_FMT "%s"
18#define IWM_PR_ARG __entry->ndev_name
19
20TRACE_EVENT(iwm_tx_nonwifi_cmd,
21 TP_PROTO(struct iwm_priv *iwm, struct iwm_udma_out_nonwifi_hdr *hdr),
22
23 TP_ARGS(iwm, hdr),
24
25 TP_STRUCT__entry(
26 IWM_ENTRY
27 __field(u8, opcode)
28 __field(u8, resp)
29 __field(u8, eot)
30 __field(u8, hw)
31 __field(u16, seq)
32 __field(u32, addr)
33 __field(u32, op1)
34 __field(u32, op2)
35 ),
36
37 TP_fast_assign(
38 IWM_ASSIGN;
39 __entry->opcode = GET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_OPCODE);
40 __entry->resp = GET_VAL32(hdr->cmd, UDMA_HDI_OUT_NW_CMD_RESP);
41 __entry->eot = GET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_EOT);
42 __entry->hw = GET_VAL32(hdr->cmd, UDMA_HDI_OUT_NW_CMD_HANDLE_BY_HW);
43 __entry->seq = GET_VAL32(hdr->cmd, UDMA_HDI_OUT_CMD_NON_WIFI_HW_SEQ_NUM);
44 __entry->addr = le32_to_cpu(hdr->addr);
45 __entry->op1 = le32_to_cpu(hdr->op1_sz);
46 __entry->op2 = le32_to_cpu(hdr->op2);
47 ),
48
49 TP_printk(
50 IWM_PR_FMT " Tx TARGET CMD: opcode 0x%x, resp %d, eot %d, "
51 "hw %d, seq 0x%x, addr 0x%x, op1 0x%x, op2 0x%x",
52 IWM_PR_ARG, __entry->opcode, __entry->resp, __entry->eot,
53 __entry->hw, __entry->seq, __entry->addr, __entry->op1,
54 __entry->op2
55 )
56);
57
58TRACE_EVENT(iwm_tx_wifi_cmd,
59 TP_PROTO(struct iwm_priv *iwm, struct iwm_umac_wifi_out_hdr *hdr),
60
61 TP_ARGS(iwm, hdr),
62
63 TP_STRUCT__entry(
64 IWM_ENTRY
65 __field(u8, opcode)
66 __field(u8, lmac)
67 __field(u8, resp)
68 __field(u8, eot)
69 __field(u8, ra_tid)
70 __field(u8, credit_group)
71 __field(u8, color)
72 __field(u16, seq)
73 ),
74
75 TP_fast_assign(
76 IWM_ASSIGN;
77 __entry->opcode = hdr->sw_hdr.cmd.cmd;
78 __entry->lmac = 0;
79 __entry->seq = __le16_to_cpu(hdr->sw_hdr.cmd.seq_num);
80 __entry->resp = GET_VAL8(hdr->sw_hdr.cmd.flags, UMAC_DEV_CMD_FLAGS_RESP_REQ);
81 __entry->color = GET_VAL32(hdr->sw_hdr.meta_data, UMAC_FW_CMD_TX_STA_COLOR);
82 __entry->eot = GET_VAL32(hdr->hw_hdr.cmd, UMAC_HDI_OUT_CMD_EOT);
83 __entry->ra_tid = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_RATID);
84 __entry->credit_group = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_CREDIT_GRP);
85 if (__entry->opcode == UMAC_CMD_OPCODE_WIFI_PASS_THROUGH ||
86 __entry->opcode == UMAC_CMD_OPCODE_WIFI_IF_WRAPPER) {
87 __entry->lmac = 1;
88 __entry->opcode = ((struct iwm_lmac_hdr *)(hdr + 1))->id;
89 }
90 ),
91
92 TP_printk(
93 IWM_PR_FMT " Tx %cMAC CMD: opcode 0x%x, resp %d, eot %d, "
94 "seq 0x%x, sta_color 0x%x, ra_tid 0x%x, credit_group 0x%x",
95 IWM_PR_ARG, __entry->lmac ? 'L' : 'U', __entry->opcode,
96 __entry->resp, __entry->eot, __entry->seq, __entry->color,
97 __entry->ra_tid, __entry->credit_group
98 )
99);
100
101TRACE_EVENT(iwm_tx_packets,
102 TP_PROTO(struct iwm_priv *iwm, u8 *buf, int len),
103
104 TP_ARGS(iwm, buf, len),
105
106 TP_STRUCT__entry(
107 IWM_ENTRY
108 __field(u8, eot)
109 __field(u8, ra_tid)
110 __field(u8, credit_group)
111 __field(u8, color)
112 __field(u16, seq)
113 __field(u8, npkt)
114 __field(u32, bytes)
115 ),
116
117 TP_fast_assign(
118 struct iwm_umac_wifi_out_hdr *hdr =
119 (struct iwm_umac_wifi_out_hdr *)buf;
120
121 IWM_ASSIGN;
122 __entry->eot = GET_VAL32(hdr->hw_hdr.cmd, UMAC_HDI_OUT_CMD_EOT);
123 __entry->ra_tid = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_RATID);
124 __entry->credit_group = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_CREDIT_GRP);
125 __entry->color = GET_VAL32(hdr->sw_hdr.meta_data, UMAC_FW_CMD_TX_STA_COLOR);
126 __entry->seq = __le16_to_cpu(hdr->sw_hdr.cmd.seq_num);
127 __entry->npkt = 1;
128 __entry->bytes = len;
129
130 if (!__entry->eot) {
131 int count;
132 u8 *ptr = buf;
133
134 __entry->npkt = 0;
135 while (ptr < buf + len) {
136 count = GET_VAL32(hdr->sw_hdr.meta_data,
137 UMAC_FW_CMD_BYTE_COUNT);
138 ptr += ALIGN(sizeof(*hdr) + count, 16);
139 hdr = (struct iwm_umac_wifi_out_hdr *)ptr;
140 __entry->npkt++;
141 }
142 }
143 ),
144
145 TP_printk(
146 IWM_PR_FMT " Tx %spacket: eot %d, seq 0x%x, sta_color 0x%x, "
147 "ra_tid 0x%x, credit_group 0x%x, embedded_packets %d, %d bytes",
148 IWM_PR_ARG, !__entry->eot ? "concatenated " : "",
149 __entry->eot, __entry->seq, __entry->color, __entry->ra_tid,
150 __entry->credit_group, __entry->npkt, __entry->bytes
151 )
152);
153
154TRACE_EVENT(iwm_rx_nonwifi_cmd,
155 TP_PROTO(struct iwm_priv *iwm, void *buf, int len),
156
157 TP_ARGS(iwm, buf, len),
158
159 TP_STRUCT__entry(
160 IWM_ENTRY
161 __field(u8, opcode)
162 __field(u16, seq)
163 __field(u32, len)
164 ),
165
166 TP_fast_assign(
167 struct iwm_udma_in_hdr *hdr = buf;
168
169 IWM_ASSIGN;
170 __entry->opcode = GET_VAL32(hdr->cmd, UDMA_HDI_IN_NW_CMD_OPCODE);
171 __entry->seq = GET_VAL32(hdr->cmd, UDMA_HDI_IN_CMD_NON_WIFI_HW_SEQ_NUM);
172 __entry->len = len;
173 ),
174
175 TP_printk(
176 IWM_PR_FMT " Rx TARGET RESP: opcode 0x%x, seq 0x%x, len 0x%x",
177 IWM_PR_ARG, __entry->opcode, __entry->seq, __entry->len
178 )
179);
180
181TRACE_EVENT(iwm_rx_wifi_cmd,
182 TP_PROTO(struct iwm_priv *iwm, struct iwm_umac_wifi_in_hdr *hdr),
183
184 TP_ARGS(iwm, hdr),
185
186 TP_STRUCT__entry(
187 IWM_ENTRY
188 __field(u8, cmd)
189 __field(u8, source)
190 __field(u16, seq)
191 __field(u32, count)
192 ),
193
194 TP_fast_assign(
195 IWM_ASSIGN;
196 __entry->cmd = hdr->sw_hdr.cmd.cmd;
197 __entry->source = GET_VAL32(hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE);
198 __entry->count = GET_VAL32(hdr->sw_hdr.meta_data, UMAC_FW_CMD_BYTE_COUNT);
199 __entry->seq = le16_to_cpu(hdr->sw_hdr.cmd.seq_num);
200 ),
201
202 TP_printk(
203 IWM_PR_FMT " Rx %s RESP: cmd 0x%x, seq 0x%x, count 0x%x",
204 IWM_PR_ARG, __entry->source == UMAC_HDI_IN_SOURCE_FHRX ? "LMAC" :
205 __entry->source == UMAC_HDI_IN_SOURCE_FW ? "UMAC" : "UDMA",
206 __entry->cmd, __entry->seq, __entry->count
207 )
208);
209
210#define iwm_ticket_action_symbol \
211 { IWM_RX_TICKET_DROP, "DROP" }, \
212 { IWM_RX_TICKET_RELEASE, "RELEASE" }, \
213 { IWM_RX_TICKET_SNIFFER, "SNIFFER" }, \
214 { IWM_RX_TICKET_ENQUEUE, "ENQUEUE" }
215
216TRACE_EVENT(iwm_rx_ticket,
217 TP_PROTO(struct iwm_priv *iwm, void *buf, int len),
218
219 TP_ARGS(iwm, buf, len),
220
221 TP_STRUCT__entry(
222 IWM_ENTRY
223 __field(u8, action)
224 __field(u8, reason)
225 __field(u16, id)
226 __field(u16, flags)
227 ),
228
229 TP_fast_assign(
230 struct iwm_rx_ticket *ticket =
231 ((struct iwm_umac_notif_rx_ticket *)buf)->tickets;
232
233 IWM_ASSIGN;
234 __entry->id = le16_to_cpu(ticket->id);
235 __entry->action = le16_to_cpu(ticket->action);
236 __entry->flags = le16_to_cpu(ticket->flags);
237 __entry->reason = (__entry->flags & IWM_RX_TICKET_DROP_REASON_MSK) >> IWM_RX_TICKET_DROP_REASON_POS;
238 ),
239
240 TP_printk(
241 IWM_PR_FMT " Rx ticket: id 0x%x, action %s, %s 0x%x%s",
242 IWM_PR_ARG, __entry->id,
243 __print_symbolic(__entry->action, iwm_ticket_action_symbol),
244 __entry->reason ? "reason" : "flags",
245 __entry->reason ? __entry->reason : __entry->flags,
246 __entry->flags & IWM_RX_TICKET_AMSDU_MSK ? ", AMSDU frame" : ""
247 )
248);
249
250TRACE_EVENT(iwm_rx_packet,
251 TP_PROTO(struct iwm_priv *iwm, void *buf, int len),
252
253 TP_ARGS(iwm, buf, len),
254
255 TP_STRUCT__entry(
256 IWM_ENTRY
257 __field(u8, source)
258 __field(u16, id)
259 __field(u32, len)
260 ),
261
262 TP_fast_assign(
263 struct iwm_umac_wifi_in_hdr *hdr = buf;
264
265 IWM_ASSIGN;
266 __entry->source = GET_VAL32(hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE);
267 __entry->id = le16_to_cpu(hdr->sw_hdr.cmd.seq_num);
268 __entry->len = len - sizeof(*hdr);
269 ),
270
271 TP_printk(
272 IWM_PR_FMT " Rx %s packet: id 0x%x, %d bytes",
273 IWM_PR_ARG, __entry->source == UMAC_HDI_IN_SOURCE_FHRX ?
274 "LMAC" : "UMAC", __entry->id, __entry->len
275 )
276);
277#endif
278
279#undef TRACE_INCLUDE_PATH
280#define TRACE_INCLUDE_PATH .
281#undef TRACE_INCLUDE_FILE
282#define TRACE_INCLUDE_FILE trace
283#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwmc3200wifi/tx.c b/drivers/net/wireless/iwmc3200wifi/tx.c
deleted file mode 100644
index be98074c0608..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/tx.c
+++ /dev/null
@@ -1,529 +0,0 @@
1/*
2 * Intel Wireless Multicomm 3200 WiFi driver
3 *
4 * Copyright (C) 2009 Intel Corporation. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 *
33 * Intel Corporation <ilw@linux.intel.com>
34 * Samuel Ortiz <samuel.ortiz@intel.com>
35 * Zhu Yi <yi.zhu@intel.com>
36 *
37 */
38
39/*
40 * iwm Tx theory of operation:
41 *
42 * 1) We receive a 802.3 frame from the stack
43 * 2) We convert it to a 802.11 frame [iwm_xmit_frame]
44 * 3) We queue it to its corresponding tx queue [iwm_xmit_frame]
45 * 4) We schedule the tx worker. There is one worker per tx
46 * queue. [iwm_xmit_frame]
47 * 5) The tx worker is scheduled
48 * 6) We go through every queued skb on the tx queue, and for each
49 * and every one of them: [iwm_tx_worker]
50 * a) We check if we have enough Tx credits (see below for a Tx
51 * credits description) for the frame length. [iwm_tx_worker]
52 * b) If we do, we aggregate the Tx frame into a UDMA one, by
53 * concatenating one REPLY_TX command per Tx frame. [iwm_tx_worker]
54 * c) When we run out of credits, or when we reach the maximum
55 * concatenation size, we actually send the concatenated UDMA
56 * frame. [iwm_tx_worker]
57 *
58 * When we run out of Tx credits, the skbs are filling the tx queue,
59 * and eventually we will stop the netdev queue. [iwm_tx_worker]
60 * The tx queue is emptied as we're getting new tx credits, by
61 * scheduling the tx_worker. [iwm_tx_credit_inc]
62 * The netdev queue is started again when we have enough tx credits,
63 * and when our tx queue has some reasonable amout of space available
64 * (i.e. half of the max size). [iwm_tx_worker]
65 */
66
67#include <linux/slab.h>
68#include <linux/skbuff.h>
69#include <linux/netdevice.h>
70#include <linux/ieee80211.h>
71
72#include "iwm.h"
73#include "debug.h"
74#include "commands.h"
75#include "hal.h"
76#include "umac.h"
77#include "bus.h"
78
79#define IWM_UMAC_PAGE_ALLOC_WRAP 0xffff
80
81#define BYTES_TO_PAGES(n) (1 + ((n) >> ilog2(IWM_UMAC_PAGE_SIZE)) - \
82 (((n) & (IWM_UMAC_PAGE_SIZE - 1)) == 0))
83
84#define pool_id_to_queue(id) ((id < IWM_TX_CMD_QUEUE) ? id : id - 1)
85#define queue_to_pool_id(q) ((q < IWM_TX_CMD_QUEUE) ? q : q + 1)
86
87/* require to hold tx_credit lock */
88static int iwm_tx_credit_get(struct iwm_tx_credit *tx_credit, int id)
89{
90 struct pool_entry *pool = &tx_credit->pools[id];
91 struct spool_entry *spool = &tx_credit->spools[pool->sid];
92 int spool_pages;
93
94 /* number of pages can be taken from spool by this pool */
95 spool_pages = spool->max_pages - spool->alloc_pages +
96 max(pool->min_pages - pool->alloc_pages, 0);
97
98 return min(pool->max_pages - pool->alloc_pages, spool_pages);
99}
100
101static bool iwm_tx_credit_ok(struct iwm_priv *iwm, int id, int nb)
102{
103 u32 npages = BYTES_TO_PAGES(nb);
104
105 if (npages <= iwm_tx_credit_get(&iwm->tx_credit, id))
106 return 1;
107
108 set_bit(id, &iwm->tx_credit.full_pools_map);
109
110 IWM_DBG_TX(iwm, DBG, "LINK: stop txq[%d], available credit: %d\n",
111 pool_id_to_queue(id),
112 iwm_tx_credit_get(&iwm->tx_credit, id));
113
114 return 0;
115}
116
117void iwm_tx_credit_inc(struct iwm_priv *iwm, int id, int total_freed_pages)
118{
119 struct pool_entry *pool;
120 struct spool_entry *spool;
121 int freed_pages;
122 int queue;
123
124 BUG_ON(id >= IWM_MACS_OUT_GROUPS);
125
126 pool = &iwm->tx_credit.pools[id];
127 spool = &iwm->tx_credit.spools[pool->sid];
128
129 freed_pages = total_freed_pages - pool->total_freed_pages;
130 IWM_DBG_TX(iwm, DBG, "Free %d pages for pool[%d]\n", freed_pages, id);
131
132 if (!freed_pages) {
133 IWM_DBG_TX(iwm, DBG, "No pages are freed by UMAC\n");
134 return;
135 } else if (freed_pages < 0)
136 freed_pages += IWM_UMAC_PAGE_ALLOC_WRAP + 1;
137
138 if (pool->alloc_pages > pool->min_pages) {
139 int spool_pages = pool->alloc_pages - pool->min_pages;
140 spool_pages = min(spool_pages, freed_pages);
141 spool->alloc_pages -= spool_pages;
142 }
143
144 pool->alloc_pages -= freed_pages;
145 pool->total_freed_pages = total_freed_pages;
146
147 IWM_DBG_TX(iwm, DBG, "Pool[%d] pages alloc: %d, total_freed: %d, "
148 "Spool[%d] pages alloc: %d\n", id, pool->alloc_pages,
149 pool->total_freed_pages, pool->sid, spool->alloc_pages);
150
151 if (test_bit(id, &iwm->tx_credit.full_pools_map) &&
152 (pool->alloc_pages < pool->max_pages / 2)) {
153 clear_bit(id, &iwm->tx_credit.full_pools_map);
154
155 queue = pool_id_to_queue(id);
156
157 IWM_DBG_TX(iwm, DBG, "LINK: start txq[%d], available "
158 "credit: %d\n", queue,
159 iwm_tx_credit_get(&iwm->tx_credit, id));
160 queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker);
161 }
162}
163
164static void iwm_tx_credit_dec(struct iwm_priv *iwm, int id, int alloc_pages)
165{
166 struct pool_entry *pool;
167 struct spool_entry *spool;
168 int spool_pages;
169
170 IWM_DBG_TX(iwm, DBG, "Allocate %d pages for pool[%d]\n",
171 alloc_pages, id);
172
173 BUG_ON(id >= IWM_MACS_OUT_GROUPS);
174
175 pool = &iwm->tx_credit.pools[id];
176 spool = &iwm->tx_credit.spools[pool->sid];
177
178 spool_pages = pool->alloc_pages + alloc_pages - pool->min_pages;
179
180 if (pool->alloc_pages >= pool->min_pages)
181 spool->alloc_pages += alloc_pages;
182 else if (spool_pages > 0)
183 spool->alloc_pages += spool_pages;
184
185 pool->alloc_pages += alloc_pages;
186
187 IWM_DBG_TX(iwm, DBG, "Pool[%d] pages alloc: %d, total_freed: %d, "
188 "Spool[%d] pages alloc: %d\n", id, pool->alloc_pages,
189 pool->total_freed_pages, pool->sid, spool->alloc_pages);
190}
191
192int iwm_tx_credit_alloc(struct iwm_priv *iwm, int id, int nb)
193{
194 u32 npages = BYTES_TO_PAGES(nb);
195 int ret = 0;
196
197 spin_lock(&iwm->tx_credit.lock);
198
199 if (!iwm_tx_credit_ok(iwm, id, nb)) {
200 IWM_DBG_TX(iwm, DBG, "No credit available for pool[%d]\n", id);
201 ret = -ENOSPC;
202 goto out;
203 }
204
205 iwm_tx_credit_dec(iwm, id, npages);
206
207 out:
208 spin_unlock(&iwm->tx_credit.lock);
209 return ret;
210}
211
212/*
213 * Since we're on an SDIO or USB bus, we are not sharing memory
214 * for storing to be transmitted frames. The host needs to push
215 * them upstream. As a consequence there needs to be a way for
216 * the target to let us know if it can actually take more TX frames
217 * or not. This is what Tx credits are for.
218 *
219 * For each Tx HW queue, we have a Tx pool, and then we have one
220 * unique super pool (spool), which is actually a global pool of
221 * all the UMAC pages.
222 * For each Tx pool we have a min_pages, a max_pages fields, and a
223 * alloc_pages fields. The alloc_pages tracks the number of pages
224 * currently allocated from the tx pool.
225 * Here are the rules to check if given a tx frame we have enough
226 * tx credits for it:
227 * 1) We translate the frame length into a number of UMAC pages.
228 * Let's call them n_pages.
229 * 2) For the corresponding tx pool, we check if n_pages +
230 * pool->alloc_pages is higher than pool->min_pages. min_pages
231 * represent a set of pre-allocated pages on the tx pool. If
232 * that's the case, then we need to allocate those pages from
233 * the spool. We can do so until we reach spool->max_pages.
234 * 3) Each tx pool is not allowed to allocate more than pool->max_pages
235 * from the spool, so once we're over min_pages, we can allocate
236 * pages from the spool, but not more than max_pages.
237 *
238 * When the tx code path needs to send a tx frame, it checks first
239 * if it has enough tx credits, following those rules. [iwm_tx_credit_get]
240 * If it does, it then updates the pool and spool counters and
241 * then send the frame. [iwm_tx_credit_alloc and iwm_tx_credit_dec]
242 * On the other side, when the UMAC is done transmitting frames, it
243 * will send a credit update notification to the host. This is when
244 * the pool and spool counters gets to be decreased. [iwm_tx_credit_inc,
245 * called from rx.c:iwm_ntf_tx_credit_update]
246 *
247 */
248void iwm_tx_credit_init_pools(struct iwm_priv *iwm,
249 struct iwm_umac_notif_alive *alive)
250{
251 int i, sid, pool_pages;
252
253 spin_lock(&iwm->tx_credit.lock);
254
255 iwm->tx_credit.pool_nr = le16_to_cpu(alive->page_grp_count);
256 iwm->tx_credit.full_pools_map = 0;
257 memset(&iwm->tx_credit.spools[0], 0, sizeof(struct spool_entry));
258
259 IWM_DBG_TX(iwm, DBG, "Pools number is %d\n", iwm->tx_credit.pool_nr);
260
261 for (i = 0; i < iwm->tx_credit.pool_nr; i++) {
262 __le32 page_grp_state = alive->page_grp_state[i];
263
264 iwm->tx_credit.pools[i].id = GET_VAL32(page_grp_state,
265 UMAC_ALIVE_PAGE_STS_GRP_NUM);
266 iwm->tx_credit.pools[i].sid = GET_VAL32(page_grp_state,
267 UMAC_ALIVE_PAGE_STS_SGRP_NUM);
268 iwm->tx_credit.pools[i].min_pages = GET_VAL32(page_grp_state,
269 UMAC_ALIVE_PAGE_STS_GRP_MIN_SIZE);
270 iwm->tx_credit.pools[i].max_pages = GET_VAL32(page_grp_state,
271 UMAC_ALIVE_PAGE_STS_GRP_MAX_SIZE);
272 iwm->tx_credit.pools[i].alloc_pages = 0;
273 iwm->tx_credit.pools[i].total_freed_pages = 0;
274
275 sid = iwm->tx_credit.pools[i].sid;
276 pool_pages = iwm->tx_credit.pools[i].min_pages;
277
278 if (iwm->tx_credit.spools[sid].max_pages == 0) {
279 iwm->tx_credit.spools[sid].id = sid;
280 iwm->tx_credit.spools[sid].max_pages =
281 GET_VAL32(page_grp_state,
282 UMAC_ALIVE_PAGE_STS_SGRP_MAX_SIZE);
283 iwm->tx_credit.spools[sid].alloc_pages = 0;
284 }
285
286 iwm->tx_credit.spools[sid].alloc_pages += pool_pages;
287
288 IWM_DBG_TX(iwm, DBG, "Pool idx: %d, id: %d, sid: %d, capacity "
289 "min: %d, max: %d, pool alloc: %d, total_free: %d, "
290 "super poll alloc: %d\n",
291 i, iwm->tx_credit.pools[i].id,
292 iwm->tx_credit.pools[i].sid,
293 iwm->tx_credit.pools[i].min_pages,
294 iwm->tx_credit.pools[i].max_pages,
295 iwm->tx_credit.pools[i].alloc_pages,
296 iwm->tx_credit.pools[i].total_freed_pages,
297 iwm->tx_credit.spools[sid].alloc_pages);
298 }
299
300 spin_unlock(&iwm->tx_credit.lock);
301}
302
303#define IWM_UDMA_HDR_LEN sizeof(struct iwm_umac_wifi_out_hdr)
304
305static __le16 iwm_tx_build_packet(struct iwm_priv *iwm, struct sk_buff *skb,
306 int pool_id, u8 *buf)
307{
308 struct iwm_umac_wifi_out_hdr *hdr = (struct iwm_umac_wifi_out_hdr *)buf;
309 struct iwm_udma_wifi_cmd udma_cmd;
310 struct iwm_umac_cmd umac_cmd;
311 struct iwm_tx_info *tx_info = skb_to_tx_info(skb);
312
313 udma_cmd.count = cpu_to_le16(skb->len +
314 sizeof(struct iwm_umac_fw_cmd_hdr));
315 /* set EOP to 0 here. iwm_udma_wifi_hdr_set_eop() will be
316 * called later to set EOP for the last packet. */
317 udma_cmd.eop = 0;
318 udma_cmd.credit_group = pool_id;
319 udma_cmd.ra_tid = tx_info->sta << 4 | tx_info->tid;
320 udma_cmd.lmac_offset = 0;
321
322 umac_cmd.id = REPLY_TX;
323 umac_cmd.count = cpu_to_le16(skb->len);
324 umac_cmd.color = tx_info->color;
325 umac_cmd.resp = 0;
326 umac_cmd.seq_num = cpu_to_le16(iwm_alloc_wifi_cmd_seq(iwm));
327
328 iwm_build_udma_wifi_hdr(iwm, &hdr->hw_hdr, &udma_cmd);
329 iwm_build_umac_hdr(iwm, &hdr->sw_hdr, &umac_cmd);
330
331 memcpy(buf + sizeof(*hdr), skb->data, skb->len);
332
333 return umac_cmd.seq_num;
334}
335
336static int iwm_tx_send_concat_packets(struct iwm_priv *iwm,
337 struct iwm_tx_queue *txq)
338{
339 int ret;
340
341 if (!txq->concat_count)
342 return 0;
343
344 IWM_DBG_TX(iwm, DBG, "Send concatenated Tx: queue %d, %d bytes\n",
345 txq->id, txq->concat_count);
346
347 /* mark EOP for the last packet */
348 iwm_udma_wifi_hdr_set_eop(iwm, txq->concat_ptr, 1);
349
350 trace_iwm_tx_packets(iwm, txq->concat_buf, txq->concat_count);
351 ret = iwm_bus_send_chunk(iwm, txq->concat_buf, txq->concat_count);
352
353 txq->concat_count = 0;
354 txq->concat_ptr = txq->concat_buf;
355
356 return ret;
357}
358
359void iwm_tx_worker(struct work_struct *work)
360{
361 struct iwm_priv *iwm;
362 struct iwm_tx_info *tx_info = NULL;
363 struct sk_buff *skb;
364 struct iwm_tx_queue *txq;
365 struct iwm_sta_info *sta_info;
366 struct iwm_tid_info *tid_info;
367 int cmdlen, ret, pool_id;
368
369 txq = container_of(work, struct iwm_tx_queue, worker);
370 iwm = container_of(txq, struct iwm_priv, txq[txq->id]);
371
372 pool_id = queue_to_pool_id(txq->id);
373
374 while (!test_bit(pool_id, &iwm->tx_credit.full_pools_map) &&
375 !skb_queue_empty(&txq->queue)) {
376
377 spin_lock_bh(&txq->lock);
378 skb = skb_dequeue(&txq->queue);
379 spin_unlock_bh(&txq->lock);
380
381 tx_info = skb_to_tx_info(skb);
382 sta_info = &iwm->sta_table[tx_info->sta];
383 if (!sta_info->valid) {
384 IWM_ERR(iwm, "Trying to send a frame to unknown STA\n");
385 kfree_skb(skb);
386 continue;
387 }
388
389 tid_info = &sta_info->tid_info[tx_info->tid];
390
391 mutex_lock(&tid_info->mutex);
392
393 /*
394 * If the RAxTID is stopped, we queue the skb to the stopped
395 * queue.
396 * Whenever we'll get a UMAC notification to resume the tx flow
397 * for this RAxTID, we'll merge back the stopped queue into the
398 * regular queue. See iwm_ntf_stop_resume_tx() from rx.c.
399 */
400 if (tid_info->stopped) {
401 IWM_DBG_TX(iwm, DBG, "%dx%d stopped\n",
402 tx_info->sta, tx_info->tid);
403 spin_lock_bh(&txq->lock);
404 skb_queue_tail(&txq->stopped_queue, skb);
405 spin_unlock_bh(&txq->lock);
406
407 mutex_unlock(&tid_info->mutex);
408 continue;
409 }
410
411 cmdlen = IWM_UDMA_HDR_LEN + skb->len;
412
413 IWM_DBG_TX(iwm, DBG, "Tx frame on queue %d: skb: 0x%p, sta: "
414 "%d, color: %d\n", txq->id, skb, tx_info->sta,
415 tx_info->color);
416
417 if (txq->concat_count + cmdlen > IWM_HAL_CONCATENATE_BUF_SIZE)
418 iwm_tx_send_concat_packets(iwm, txq);
419
420 ret = iwm_tx_credit_alloc(iwm, pool_id, cmdlen);
421 if (ret) {
422 IWM_DBG_TX(iwm, DBG, "not enough tx_credit for queue "
423 "%d, Tx worker stopped\n", txq->id);
424 spin_lock_bh(&txq->lock);
425 skb_queue_head(&txq->queue, skb);
426 spin_unlock_bh(&txq->lock);
427
428 mutex_unlock(&tid_info->mutex);
429 break;
430 }
431
432 txq->concat_ptr = txq->concat_buf + txq->concat_count;
433 tid_info->last_seq_num =
434 iwm_tx_build_packet(iwm, skb, pool_id, txq->concat_ptr);
435 txq->concat_count += ALIGN(cmdlen, 16);
436
437 mutex_unlock(&tid_info->mutex);
438
439 kfree_skb(skb);
440 }
441
442 iwm_tx_send_concat_packets(iwm, txq);
443
444 if (__netif_subqueue_stopped(iwm_to_ndev(iwm), txq->id) &&
445 !test_bit(pool_id, &iwm->tx_credit.full_pools_map) &&
446 (skb_queue_len(&txq->queue) < IWM_TX_LIST_SIZE / 2)) {
447 IWM_DBG_TX(iwm, DBG, "LINK: start netif_subqueue[%d]", txq->id);
448 netif_wake_subqueue(iwm_to_ndev(iwm), txq->id);
449 }
450}
451
452int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
453{
454 struct iwm_priv *iwm = ndev_to_iwm(netdev);
455 struct wireless_dev *wdev = iwm_to_wdev(iwm);
456 struct iwm_tx_info *tx_info;
457 struct iwm_tx_queue *txq;
458 struct iwm_sta_info *sta_info;
459 u8 *dst_addr, sta_id;
460 u16 queue;
461 int ret;
462
463
464 if (!test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)) {
465 IWM_DBG_TX(iwm, DBG, "LINK: stop netif_all_queues: "
466 "not associated\n");
467 netif_tx_stop_all_queues(netdev);
468 goto drop;
469 }
470
471 queue = skb_get_queue_mapping(skb);
472 BUG_ON(queue >= IWM_TX_DATA_QUEUES); /* no iPAN yet */
473
474 txq = &iwm->txq[queue];
475
476 /* No free space for Tx, tx_worker is too slow */
477 if ((skb_queue_len(&txq->queue) > IWM_TX_LIST_SIZE) ||
478 (skb_queue_len(&txq->stopped_queue) > IWM_TX_LIST_SIZE)) {
479 IWM_DBG_TX(iwm, DBG, "LINK: stop netif_subqueue[%d]\n", queue);
480 netif_stop_subqueue(netdev, queue);
481 return NETDEV_TX_BUSY;
482 }
483
484 ret = ieee80211_data_from_8023(skb, netdev->dev_addr, wdev->iftype,
485 iwm->bssid, 0);
486 if (ret) {
487 IWM_ERR(iwm, "build wifi header failed\n");
488 goto drop;
489 }
490
491 dst_addr = ((struct ieee80211_hdr *)(skb->data))->addr1;
492
493 for (sta_id = 0; sta_id < IWM_STA_TABLE_NUM; sta_id++) {
494 sta_info = &iwm->sta_table[sta_id];
495 if (sta_info->valid &&
496 !memcmp(dst_addr, sta_info->addr, ETH_ALEN))
497 break;
498 }
499
500 if (sta_id == IWM_STA_TABLE_NUM) {
501 IWM_ERR(iwm, "STA %pM not found in sta_table, Tx ignored\n",
502 dst_addr);
503 goto drop;
504 }
505
506 tx_info = skb_to_tx_info(skb);
507 tx_info->sta = sta_id;
508 tx_info->color = sta_info->color;
509 /* UMAC uses TID 8 (vs. 0) for non QoS packets */
510 if (sta_info->qos)
511 tx_info->tid = skb->priority;
512 else
513 tx_info->tid = IWM_UMAC_MGMT_TID;
514
515 spin_lock_bh(&iwm->txq[queue].lock);
516 skb_queue_tail(&iwm->txq[queue].queue, skb);
517 spin_unlock_bh(&iwm->txq[queue].lock);
518
519 queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker);
520
521 netdev->stats.tx_packets++;
522 netdev->stats.tx_bytes += skb->len;
523 return NETDEV_TX_OK;
524
525 drop:
526 netdev->stats.tx_dropped++;
527 dev_kfree_skb_any(skb);
528 return NETDEV_TX_OK;
529}
diff --git a/drivers/net/wireless/iwmc3200wifi/umac.h b/drivers/net/wireless/iwmc3200wifi/umac.h
deleted file mode 100644
index 4a137d334a42..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/umac.h
+++ /dev/null
@@ -1,789 +0,0 @@
1/*
2 * Intel Wireless Multicomm 3200 WiFi driver
3 *
4 * Copyright (C) 2009 Intel Corporation. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 *
33 * Intel Corporation <ilw@linux.intel.com>
34 * Samuel Ortiz <samuel.ortiz@intel.com>
35 * Zhu Yi <yi.zhu@intel.com>
36 *
37 */
38
39#ifndef __IWM_UMAC_H__
40#define __IWM_UMAC_H__
41
42struct iwm_udma_in_hdr {
43 __le32 cmd;
44 __le32 size;
45} __packed;
46
47struct iwm_udma_out_nonwifi_hdr {
48 __le32 cmd;
49 __le32 addr;
50 __le32 op1_sz;
51 __le32 op2;
52} __packed;
53
54struct iwm_udma_out_wifi_hdr {
55 __le32 cmd;
56 __le32 meta_data;
57} __packed;
58
59/* Sequence numbering */
60#define UMAC_WIFI_SEQ_NUM_BASE 1
61#define UMAC_WIFI_SEQ_NUM_MAX 0x4000
62#define UMAC_NONWIFI_SEQ_NUM_BASE 1
63#define UMAC_NONWIFI_SEQ_NUM_MAX 0x10
64
65/* MAC address address */
66#define WICO_MAC_ADDRESS_ADDR 0x604008F8
67
68/* RA / TID */
69#define UMAC_HDI_ACT_TBL_IDX_TID_POS 0
70#define UMAC_HDI_ACT_TBL_IDX_TID_SEED 0xF
71
72#define UMAC_HDI_ACT_TBL_IDX_RA_POS 4
73#define UMAC_HDI_ACT_TBL_IDX_RA_SEED 0xF
74
75#define UMAC_HDI_ACT_TBL_IDX_RA_UMAC 0xF
76#define UMAC_HDI_ACT_TBL_IDX_TID_UMAC 0x9
77#define UMAC_HDI_ACT_TBL_IDX_TID_LMAC 0xA
78
79#define UMAC_HDI_ACT_TBL_IDX_HOST_CMD \
80 ((UMAC_HDI_ACT_TBL_IDX_RA_UMAC << UMAC_HDI_ACT_TBL_IDX_RA_POS) |\
81 (UMAC_HDI_ACT_TBL_IDX_TID_UMAC << UMAC_HDI_ACT_TBL_IDX_TID_POS))
82#define UMAC_HDI_ACT_TBL_IDX_UMAC_CMD \
83 ((UMAC_HDI_ACT_TBL_IDX_RA_UMAC << UMAC_HDI_ACT_TBL_IDX_RA_POS) |\
84 (UMAC_HDI_ACT_TBL_IDX_TID_LMAC << UMAC_HDI_ACT_TBL_IDX_TID_POS))
85
86/* STA ID and color */
87#define STA_ID_SEED (0x0f)
88#define STA_ID_POS (0)
89#define STA_ID_MSK (STA_ID_SEED << STA_ID_POS)
90
91#define STA_COLOR_SEED (0x7)
92#define STA_COLOR_POS (4)
93#define STA_COLOR_MSK (STA_COLOR_SEED << STA_COLOR_POS)
94
95#define STA_ID_N_COLOR_COLOR(id_n_color) \
96 (((id_n_color) & STA_COLOR_MSK) >> STA_COLOR_POS)
97#define STA_ID_N_COLOR_ID(id_n_color) \
98 (((id_n_color) & STA_ID_MSK) >> STA_ID_POS)
99
100/* iwm_umac_notif_alive.page_grp_state Group number -- bits [3:0] */
101#define UMAC_ALIVE_PAGE_STS_GRP_NUM_POS 0
102#define UMAC_ALIVE_PAGE_STS_GRP_NUM_SEED 0xF
103
104/* iwm_umac_notif_alive.page_grp_state Super group number -- bits [7:4] */
105#define UMAC_ALIVE_PAGE_STS_SGRP_NUM_POS 4
106#define UMAC_ALIVE_PAGE_STS_SGRP_NUM_SEED 0xF
107
108/* iwm_umac_notif_alive.page_grp_state Group min size -- bits [15:8] */
109#define UMAC_ALIVE_PAGE_STS_GRP_MIN_SIZE_POS 8
110#define UMAC_ALIVE_PAGE_STS_GRP_MIN_SIZE_SEED 0xFF
111
112/* iwm_umac_notif_alive.page_grp_state Group max size -- bits [23:16] */
113#define UMAC_ALIVE_PAGE_STS_GRP_MAX_SIZE_POS 16
114#define UMAC_ALIVE_PAGE_STS_GRP_MAX_SIZE_SEED 0xFF
115
116/* iwm_umac_notif_alive.page_grp_state Super group max size -- bits [31:24] */
117#define UMAC_ALIVE_PAGE_STS_SGRP_MAX_SIZE_POS 24
118#define UMAC_ALIVE_PAGE_STS_SGRP_MAX_SIZE_SEED 0xFF
119
120/* Barkers */
121#define UMAC_REBOOT_BARKER 0xdeadbeef
122#define UMAC_ACK_BARKER 0xfeedbabe
123#define UMAC_PAD_TERMINAL 0xadadadad
124
125/* UMAC JMP address */
126#define UMAC_MU_FW_INST_DATA_12_ADDR 0xBF0000
127
128/* iwm_umac_hdi_out_hdr.cmd OP code -- bits [3:0] */
129#define UMAC_HDI_OUT_CMD_OPCODE_POS 0
130#define UMAC_HDI_OUT_CMD_OPCODE_SEED 0xF
131
132/* iwm_umac_hdi_out_hdr.cmd End-Of-Transfer -- bits [10:10] */
133#define UMAC_HDI_OUT_CMD_EOT_POS 10
134#define UMAC_HDI_OUT_CMD_EOT_SEED 0x1
135
136/* iwm_umac_hdi_out_hdr.cmd UTFD only usage -- bits [11:11] */
137#define UMAC_HDI_OUT_CMD_UTFD_ONLY_POS 11
138#define UMAC_HDI_OUT_CMD_UTFD_ONLY_SEED 0x1
139
140/* iwm_umac_hdi_out_hdr.cmd Non-WiFi HW sequence number -- bits [12:15] */
141#define UDMA_HDI_OUT_CMD_NON_WIFI_HW_SEQ_NUM_POS 12
142#define UDMA_HDI_OUT_CMD_NON_WIFI_HW_SEQ_NUM_SEED 0xF
143
144/* iwm_umac_hdi_out_hdr.cmd Signature -- bits [31:16] */
145#define UMAC_HDI_OUT_CMD_SIGNATURE_POS 16
146#define UMAC_HDI_OUT_CMD_SIGNATURE_SEED 0xFFFF
147
148/* iwm_umac_hdi_out_hdr.meta_data Byte count -- bits [11:0] */
149#define UMAC_HDI_OUT_BYTE_COUNT_POS 0
150#define UMAC_HDI_OUT_BYTE_COUNT_SEED 0xFFF
151
152/* iwm_umac_hdi_out_hdr.meta_data Credit group -- bits [15:12] */
153#define UMAC_HDI_OUT_CREDIT_GRP_POS 12
154#define UMAC_HDI_OUT_CREDIT_GRP_SEED 0xF
155
156/* iwm_umac_hdi_out_hdr.meta_data RA/TID -- bits [23:16] */
157#define UMAC_HDI_OUT_RATID_POS 16
158#define UMAC_HDI_OUT_RATID_SEED 0xFF
159
160/* iwm_umac_hdi_out_hdr.meta_data LMAC offset -- bits [31:24] */
161#define UMAC_HDI_OUT_LMAC_OFFSET_POS 24
162#define UMAC_HDI_OUT_LMAC_OFFSET_SEED 0xFF
163
164/* Signature */
165#define UMAC_HDI_OUT_SIGNATURE 0xCBBC
166
167/* buffer alignment */
168#define UMAC_HDI_BUF_ALIGN_MSK 0xF
169
170/* iwm_umac_hdi_in_hdr.cmd OP code -- bits [3:0] */
171#define UMAC_HDI_IN_CMD_OPCODE_POS 0
172#define UMAC_HDI_IN_CMD_OPCODE_SEED 0xF
173
174/* iwm_umac_hdi_in_hdr.cmd Non-WiFi API response -- bits [6:4] */
175#define UMAC_HDI_IN_CMD_NON_WIFI_RESP_POS 4
176#define UMAC_HDI_IN_CMD_NON_WIFI_RESP_SEED 0x7
177
178/* iwm_umac_hdi_in_hdr.cmd WiFi API source -- bits [5:4] */
179#define UMAC_HDI_IN_CMD_SOURCE_POS 4
180#define UMAC_HDI_IN_CMD_SOURCE_SEED 0x3
181
182/* iwm_umac_hdi_in_hdr.cmd WiFi API EOT -- bits [6:6] */
183#define UMAC_HDI_IN_CMD_EOT_POS 6
184#define UMAC_HDI_IN_CMD_EOT_SEED 0x1
185
186/* iwm_umac_hdi_in_hdr.cmd timestamp present -- bits [7:7] */
187#define UMAC_HDI_IN_CMD_TIME_STAMP_PRESENT_POS 7
188#define UMAC_HDI_IN_CMD_TIME_STAMP_PRESENT_SEED 0x1
189
190/* iwm_umac_hdi_in_hdr.cmd WiFi Non-last AMSDU -- bits [8:8] */
191#define UMAC_HDI_IN_CMD_NON_LAST_AMSDU_POS 8
192#define UMAC_HDI_IN_CMD_NON_LAST_AMSDU_SEED 0x1
193
194/* iwm_umac_hdi_in_hdr.cmd WiFi HW sequence number -- bits [31:9] */
195#define UMAC_HDI_IN_CMD_HW_SEQ_NUM_POS 9
196#define UMAC_HDI_IN_CMD_HW_SEQ_NUM_SEED 0x7FFFFF
197
198/* iwm_umac_hdi_in_hdr.cmd Non-WiFi HW sequence number -- bits [12:15] */
199#define UDMA_HDI_IN_CMD_NON_WIFI_HW_SEQ_NUM_POS 12
200#define UDMA_HDI_IN_CMD_NON_WIFI_HW_SEQ_NUM_SEED 0xF
201
202/* iwm_umac_hdi_in_hdr.cmd Non-WiFi HW signature -- bits [16:31] */
203#define UDMA_HDI_IN_CMD_NON_WIFI_HW_SIG_POS 16
204#define UDMA_HDI_IN_CMD_NON_WIFI_HW_SIG_SEED 0xFFFF
205
206/* Fixed Non-WiFi signature */
207#define UDMA_HDI_IN_CMD_NON_WIFI_HW_SIG 0xCBBC
208
209/* IN NTFY op-codes */
210#define UMAC_NOTIFY_OPCODE_ALIVE 0xA1
211#define UMAC_NOTIFY_OPCODE_INIT_COMPLETE 0xA2
212#define UMAC_NOTIFY_OPCODE_WIFI_CORE_STATUS 0xA3
213#define UMAC_NOTIFY_OPCODE_ERROR 0xA4
214#define UMAC_NOTIFY_OPCODE_DEBUG 0xA5
215#define UMAC_NOTIFY_OPCODE_WIFI_IF_WRAPPER 0xB0
216#define UMAC_NOTIFY_OPCODE_STATS 0xB1
217#define UMAC_NOTIFY_OPCODE_PAGE_DEALLOC 0xB3
218#define UMAC_NOTIFY_OPCODE_RX_TICKET 0xB4
219#define UMAC_NOTIFY_OPCODE_MAX (UMAC_NOTIFY_OPCODE_RX_TICKET -\
220 UMAC_NOTIFY_OPCODE_ALIVE + 1)
221#define UMAC_NOTIFY_OPCODE_FIRST (UMAC_NOTIFY_OPCODE_ALIVE)
222
223/* HDI OUT OP CODE */
224#define UMAC_HDI_OUT_OPCODE_PING 0x0
225#define UMAC_HDI_OUT_OPCODE_READ 0x1
226#define UMAC_HDI_OUT_OPCODE_WRITE 0x2
227#define UMAC_HDI_OUT_OPCODE_JUMP 0x3
228#define UMAC_HDI_OUT_OPCODE_REBOOT 0x4
229#define UMAC_HDI_OUT_OPCODE_WRITE_PERSISTENT 0x5
230#define UMAC_HDI_OUT_OPCODE_READ_PERSISTENT 0x6
231#define UMAC_HDI_OUT_OPCODE_READ_MODIFY_WRITE 0x7
232/* #define UMAC_HDI_OUT_OPCODE_RESERVED 0x8..0xA */
233#define UMAC_HDI_OUT_OPCODE_WRITE_AUX_REG 0xB
234#define UMAC_HDI_OUT_OPCODE_WIFI 0xF
235
236/* HDI IN OP CODE -- Non WiFi*/
237#define UMAC_HDI_IN_OPCODE_PING 0x0
238#define UMAC_HDI_IN_OPCODE_READ 0x1
239#define UMAC_HDI_IN_OPCODE_WRITE 0x2
240#define UMAC_HDI_IN_OPCODE_WRITE_PERSISTENT 0x5
241#define UMAC_HDI_IN_OPCODE_READ_PERSISTENT 0x6
242#define UMAC_HDI_IN_OPCODE_READ_MODIFY_WRITE 0x7
243#define UMAC_HDI_IN_OPCODE_EP_MGMT 0x8
244#define UMAC_HDI_IN_OPCODE_CREDIT_CHANGE 0x9
245#define UMAC_HDI_IN_OPCODE_CTRL_DATABASE 0xA
246#define UMAC_HDI_IN_OPCODE_WRITE_AUX_REG 0xB
247#define UMAC_HDI_IN_OPCODE_NONWIFI_MAX \
248 (UMAC_HDI_IN_OPCODE_WRITE_AUX_REG + 1)
249#define UMAC_HDI_IN_OPCODE_WIFI 0xF
250
251/* HDI IN SOURCE */
252#define UMAC_HDI_IN_SOURCE_FHRX 0x0
253#define UMAC_HDI_IN_SOURCE_UDMA 0x1
254#define UMAC_HDI_IN_SOURCE_FW 0x2
255#define UMAC_HDI_IN_SOURCE_RESERVED 0x3
256
257/* OUT CMD op-codes */
258#define UMAC_CMD_OPCODE_ECHO 0x01
259#define UMAC_CMD_OPCODE_HALT 0x02
260#define UMAC_CMD_OPCODE_RESET 0x03
261#define UMAC_CMD_OPCODE_BULK_EP_INACT_TIMEOUT 0x09
262#define UMAC_CMD_OPCODE_URB_CANCEL_ACK 0x0A
263#define UMAC_CMD_OPCODE_DCACHE_FLUSH 0x0B
264#define UMAC_CMD_OPCODE_EEPROM_PROXY 0x0C
265#define UMAC_CMD_OPCODE_TX_ECHO 0x0D
266#define UMAC_CMD_OPCODE_DBG_MON 0x0E
267#define UMAC_CMD_OPCODE_INTERNAL_TX 0x0F
268#define UMAC_CMD_OPCODE_SET_PARAM_FIX 0x10
269#define UMAC_CMD_OPCODE_SET_PARAM_VAR 0x11
270#define UMAC_CMD_OPCODE_GET_PARAM 0x12
271#define UMAC_CMD_OPCODE_DBG_EVENT_WRAPPER 0x13
272#define UMAC_CMD_OPCODE_TARGET 0x14
273#define UMAC_CMD_OPCODE_STATISTIC_REQUEST 0x15
274#define UMAC_CMD_OPCODE_GET_CHAN_INFO_LIST 0x16
275#define UMAC_CMD_OPCODE_SET_PARAM_LIST 0x17
276#define UMAC_CMD_OPCODE_GET_PARAM_LIST 0x18
277#define UMAC_CMD_OPCODE_STOP_RESUME_STA_TX 0x19
278#define UMAC_CMD_OPCODE_TEST_BLOCK_ACK 0x1A
279
280#define UMAC_CMD_OPCODE_BASE_WRAPPER 0xFA
281#define UMAC_CMD_OPCODE_LMAC_WRAPPER 0xFB
282#define UMAC_CMD_OPCODE_HW_TEST_WRAPPER 0xFC
283#define UMAC_CMD_OPCODE_WIFI_IF_WRAPPER 0xFD
284#define UMAC_CMD_OPCODE_WIFI_WRAPPER 0xFE
285#define UMAC_CMD_OPCODE_WIFI_PASS_THROUGH 0xFF
286
287/* UMAC WiFi interface op-codes */
288#define UMAC_WIFI_IF_CMD_SET_PROFILE 0x11
289#define UMAC_WIFI_IF_CMD_INVALIDATE_PROFILE 0x12
290#define UMAC_WIFI_IF_CMD_SET_EXCLUDE_LIST 0x13
291#define UMAC_WIFI_IF_CMD_SCAN_REQUEST 0x14
292#define UMAC_WIFI_IF_CMD_SCAN_CONFIG 0x15
293#define UMAC_WIFI_IF_CMD_ADD_WEP40_KEY 0x16
294#define UMAC_WIFI_IF_CMD_ADD_WEP104_KEY 0x17
295#define UMAC_WIFI_IF_CMD_ADD_TKIP_KEY 0x18
296#define UMAC_WIFI_IF_CMD_ADD_CCMP_KEY 0x19
297#define UMAC_WIFI_IF_CMD_REMOVE_KEY 0x1A
298#define UMAC_WIFI_IF_CMD_GLOBAL_TX_KEY_ID 0x1B
299#define UMAC_WIFI_IF_CMD_SET_HOST_EXTENDED_IE 0x1C
300#define UMAC_WIFI_IF_CMD_GET_SUPPORTED_CHANNELS 0x1E
301#define UMAC_WIFI_IF_CMD_PMKID_UPDATE 0x1F
302#define UMAC_WIFI_IF_CMD_TX_PWR_TRIGGER 0x20
303
304/* UMAC WiFi interface ports */
305#define UMAC_WIFI_IF_FLG_PORT_DEF 0x00
306#define UMAC_WIFI_IF_FLG_PORT_PAN 0x01
307#define UMAC_WIFI_IF_FLG_PORT_PAN_INVALID WIFI_IF_FLG_PORT_DEF
308
309/* UMAC WiFi interface actions */
310#define UMAC_WIFI_IF_FLG_ACT_GET 0x10
311#define UMAC_WIFI_IF_FLG_ACT_SET 0x20
312
313/* iwm_umac_fw_cmd_hdr.meta_data byte count -- bits [11:0] */
314#define UMAC_FW_CMD_BYTE_COUNT_POS 0
315#define UMAC_FW_CMD_BYTE_COUNT_SEED 0xFFF
316
317/* iwm_umac_fw_cmd_hdr.meta_data status -- bits [15:12] */
318#define UMAC_FW_CMD_STATUS_POS 12
319#define UMAC_FW_CMD_STATUS_SEED 0xF
320
321/* iwm_umac_fw_cmd_hdr.meta_data full TX command by Driver -- bits [16:16] */
322#define UMAC_FW_CMD_TX_DRV_FULL_CMD_POS 16
323#define UMAC_FW_CMD_TX_DRV_FULL_CMD_SEED 0x1
324
325/* iwm_umac_fw_cmd_hdr.meta_data TX command by FW -- bits [17:17] */
326#define UMAC_FW_CMD_TX_FW_CMD_POS 17
327#define UMAC_FW_CMD_TX_FW_CMD_SEED 0x1
328
329/* iwm_umac_fw_cmd_hdr.meta_data TX plaintext mode -- bits [18:18] */
330#define UMAC_FW_CMD_TX_PLAINTEXT_POS 18
331#define UMAC_FW_CMD_TX_PLAINTEXT_SEED 0x1
332
333/* iwm_umac_fw_cmd_hdr.meta_data STA color -- bits [22:20] */
334#define UMAC_FW_CMD_TX_STA_COLOR_POS 20
335#define UMAC_FW_CMD_TX_STA_COLOR_SEED 0x7
336
337/* iwm_umac_fw_cmd_hdr.meta_data TX life time (TU) -- bits [31:24] */
338#define UMAC_FW_CMD_TX_LIFETIME_TU_POS 24
339#define UMAC_FW_CMD_TX_LIFETIME_TU_SEED 0xFF
340
341/* iwm_dev_cmd_hdr.flags Response required -- bits [5:5] */
342#define UMAC_DEV_CMD_FLAGS_RESP_REQ_POS 5
343#define UMAC_DEV_CMD_FLAGS_RESP_REQ_SEED 0x1
344
345/* iwm_dev_cmd_hdr.flags Aborted command -- bits [6:6] */
346#define UMAC_DEV_CMD_FLAGS_ABORT_POS 6
347#define UMAC_DEV_CMD_FLAGS_ABORT_SEED 0x1
348
349/* iwm_dev_cmd_hdr.flags Internal command -- bits [7:7] */
350#define DEV_CMD_FLAGS_FLD_INTERNAL_POS 7
351#define DEV_CMD_FLAGS_FLD_INTERNAL_SEED 0x1
352
353/* Rx */
354/* Rx actions */
355#define IWM_RX_TICKET_DROP 0x0
356#define IWM_RX_TICKET_RELEASE 0x1
357#define IWM_RX_TICKET_SNIFFER 0x2
358#define IWM_RX_TICKET_ENQUEUE 0x3
359
360/* Rx flags */
361#define IWM_RX_TICKET_PAD_SIZE_MSK 0x2
362#define IWM_RX_TICKET_SPECIAL_SNAP_MSK 0x4
363#define IWM_RX_TICKET_AMSDU_MSK 0x8
364#define IWM_RX_TICKET_DROP_REASON_POS 4
365#define IWM_RX_TICKET_DROP_REASON_MSK (0x1F << IWM_RX_TICKET_DROP_REASON_POS)
366
367#define IWM_RX_DROP_NO_DROP 0x0
368#define IWM_RX_DROP_BAD_CRC 0x1
369/* L2P no address match */
370#define IWM_RX_DROP_LMAC_ADDR_FILTER 0x2
371/* Multicast address not in list */
372#define IWM_RX_DROP_MCAST_ADDR_FILTER 0x3
373/* Control frames are not sent to the driver */
374#define IWM_RX_DROP_CTL_FRAME 0x4
375/* Our frame is back */
376#define IWM_RX_DROP_OUR_TX 0x5
377/* Association class filtering */
378#define IWM_RX_DROP_CLASS_FILTER 0x6
379/* Duplicated frame */
380#define IWM_RX_DROP_DUPLICATE_FILTER 0x7
381/* Decryption error */
382#define IWM_RX_DROP_SEC_ERR 0x8
383/* Unencrypted frame while encryption is on */
384#define IWM_RX_DROP_SEC_NO_ENCRYPTION 0x9
385/* Replay check failure */
386#define IWM_RX_DROP_SEC_REPLAY_ERR 0xa
387/* uCode and FW key color mismatch, check before replay */
388#define IWM_RX_DROP_SEC_KEY_COLOR_MISMATCH 0xb
389#define IWM_RX_DROP_SEC_TKIP_COUNTER_MEASURE 0xc
390/* No fragmentations Db is found */
391#define IWM_RX_DROP_FRAG_NO_RESOURCE 0xd
392/* Fragmention Db has seqCtl mismatch Vs. non-1st frag */
393#define IWM_RX_DROP_FRAG_ERR 0xe
394#define IWM_RX_DROP_FRAG_LOST 0xf
395#define IWM_RX_DROP_FRAG_COMPLETE 0x10
396/* Should be handled by UMAC */
397#define IWM_RX_DROP_MANAGEMENT 0x11
398/* STA not found by UMAC */
399#define IWM_RX_DROP_NO_STATION 0x12
400/* NULL or QoS NULL */
401#define IWM_RX_DROP_NULL_DATA 0x13
402#define IWM_RX_DROP_BA_REORDER_OLD_SEQCTL 0x14
403#define IWM_RX_DROP_BA_REORDER_DUPLICATE 0x15
404
405struct iwm_rx_ticket {
406 __le16 action;
407 __le16 id;
408 __le16 flags;
409 u8 payload_offset; /* includes: MAC header, pad, IV */
410 u8 tail_len; /* includes: MIC, ICV, CRC (w/o STATUS) */
411} __packed;
412
413struct iwm_rx_mpdu_hdr {
414 __le16 len;
415 __le16 reserved;
416} __packed;
417
418/* UMAC SW WIFI API */
419
420struct iwm_dev_cmd_hdr {
421 u8 cmd;
422 u8 flags;
423 __le16 seq_num;
424} __packed;
425
426struct iwm_umac_fw_cmd_hdr {
427 __le32 meta_data;
428 struct iwm_dev_cmd_hdr cmd;
429} __packed;
430
431struct iwm_umac_wifi_out_hdr {
432 struct iwm_udma_out_wifi_hdr hw_hdr;
433 struct iwm_umac_fw_cmd_hdr sw_hdr;
434} __packed;
435
436struct iwm_umac_nonwifi_out_hdr {
437 struct iwm_udma_out_nonwifi_hdr hw_hdr;
438} __packed;
439
440struct iwm_umac_wifi_in_hdr {
441 struct iwm_udma_in_hdr hw_hdr;
442 struct iwm_umac_fw_cmd_hdr sw_hdr;
443} __packed;
444
445struct iwm_umac_nonwifi_in_hdr {
446 struct iwm_udma_in_hdr hw_hdr;
447 __le32 time_stamp;
448} __packed;
449
450#define IWM_UMAC_PAGE_SIZE 0x200
451
452/* Notify structures */
453struct iwm_fw_version {
454 u8 minor;
455 u8 major;
456 __le16 id;
457};
458
459struct iwm_fw_build {
460 u8 type;
461 u8 subtype;
462 u8 platform;
463 u8 opt;
464};
465
466struct iwm_fw_alive_hdr {
467 struct iwm_fw_version ver;
468 struct iwm_fw_build build;
469 __le32 os_build;
470 __le32 log_hdr_addr;
471 __le32 log_buf_addr;
472 __le32 sys_timer_addr;
473};
474
475#define WAIT_NOTIF_TIMEOUT (2 * HZ)
476#define SCAN_COMPLETE_TIMEOUT (3 * HZ)
477
478#define UMAC_NTFY_ALIVE_STATUS_ERR 0xDEAD
479#define UMAC_NTFY_ALIVE_STATUS_OK 0xCAFE
480
481#define UMAC_NTFY_INIT_COMPLETE_STATUS_ERR 0xDEAD
482#define UMAC_NTFY_INIT_COMPLETE_STATUS_OK 0xCAFE
483
484#define UMAC_NTFY_WIFI_CORE_STATUS_LINK_EN 0x40
485#define UMAC_NTFY_WIFI_CORE_STATUS_MLME_EN 0x80
486
487#define IWM_MACS_OUT_GROUPS 6
488#define IWM_MACS_OUT_SGROUPS 1
489
490
491#define WIFI_IF_NTFY_ASSOC_START 0x80
492#define WIFI_IF_NTFY_ASSOC_COMPLETE 0x81
493#define WIFI_IF_NTFY_PROFILE_INVALIDATE_COMPLETE 0x82
494#define WIFI_IF_NTFY_CONNECTION_TERMINATED 0x83
495#define WIFI_IF_NTFY_SCAN_COMPLETE 0x84
496#define WIFI_IF_NTFY_STA_TABLE_CHANGE 0x85
497#define WIFI_IF_NTFY_EXTENDED_IE_REQUIRED 0x86
498#define WIFI_IF_NTFY_RADIO_PREEMPTION 0x87
499#define WIFI_IF_NTFY_BSS_TRK_TABLE_CHANGED 0x88
500#define WIFI_IF_NTFY_BSS_TRK_ENTRIES_REMOVED 0x89
501#define WIFI_IF_NTFY_LINK_QUALITY_STATISTICS 0x8A
502#define WIFI_IF_NTFY_MGMT_FRAME 0x8B
503
504/* DEBUG INDICATIONS */
505#define WIFI_DBG_IF_NTFY_SCAN_SUPER_JOB_START 0xE0
506#define WIFI_DBG_IF_NTFY_SCAN_SUPER_JOB_COMPLETE 0xE1
507#define WIFI_DBG_IF_NTFY_SCAN_CHANNEL_START 0xE2
508#define WIFI_DBG_IF_NTFY_SCAN_CHANNEL_RESULT 0xE3
509#define WIFI_DBG_IF_NTFY_SCAN_MINI_JOB_START 0xE4
510#define WIFI_DBG_IF_NTFY_SCAN_MINI_JOB_COMPLETE 0xE5
511#define WIFI_DBG_IF_NTFY_CNCT_ATC_START 0xE6
512#define WIFI_DBG_IF_NTFY_COEX_NOTIFICATION 0xE7
513#define WIFI_DBG_IF_NTFY_COEX_HANDLE_ENVELOP 0xE8
514#define WIFI_DBG_IF_NTFY_COEX_HANDLE_RELEASE_ENVELOP 0xE9
515
516#define WIFI_IF_NTFY_MAX 0xff
517
518/* Notification structures */
519struct iwm_umac_notif_wifi_if {
520 struct iwm_umac_wifi_in_hdr hdr;
521 u8 status;
522 u8 flags;
523 __le16 buf_size;
524} __packed;
525
526#define UMAC_ROAM_REASON_FIRST_SELECTION 0x1
527#define UMAC_ROAM_REASON_AP_DEAUTH 0x2
528#define UMAC_ROAM_REASON_AP_CONNECT_LOST 0x3
529#define UMAC_ROAM_REASON_RSSI 0x4
530#define UMAC_ROAM_REASON_AP_ASSISTED_ROAM 0x5
531#define UMAC_ROAM_REASON_IBSS_COALESCING 0x6
532
533struct iwm_umac_notif_assoc_start {
534 struct iwm_umac_notif_wifi_if mlme_hdr;
535 __le32 roam_reason;
536 u8 bssid[ETH_ALEN];
537 u8 reserved[2];
538} __packed;
539
540#define UMAC_ASSOC_COMPLETE_SUCCESS 0x0
541#define UMAC_ASSOC_COMPLETE_FAILURE 0x1
542
543struct iwm_umac_notif_assoc_complete {
544 struct iwm_umac_notif_wifi_if mlme_hdr;
545 __le32 status;
546 u8 bssid[ETH_ALEN];
547 u8 band;
548 u8 channel;
549} __packed;
550
551#define UMAC_PROFILE_INVALID_ASSOC_TIMEOUT 0x0
552#define UMAC_PROFILE_INVALID_ROAM_TIMEOUT 0x1
553#define UMAC_PROFILE_INVALID_REQUEST 0x2
554#define UMAC_PROFILE_INVALID_RF_PREEMPTED 0x3
555
556struct iwm_umac_notif_profile_invalidate {
557 struct iwm_umac_notif_wifi_if mlme_hdr;
558 __le32 reason;
559} __packed;
560
561#define UMAC_SCAN_RESULT_SUCCESS 0x0
562#define UMAC_SCAN_RESULT_ABORTED 0x1
563#define UMAC_SCAN_RESULT_REJECTED 0x2
564#define UMAC_SCAN_RESULT_FAILED 0x3
565
566struct iwm_umac_notif_scan_complete {
567 struct iwm_umac_notif_wifi_if mlme_hdr;
568 __le32 type;
569 __le32 result;
570 u8 seq_num;
571} __packed;
572
573#define UMAC_OPCODE_ADD_MODIFY 0x0
574#define UMAC_OPCODE_REMOVE 0x1
575#define UMAC_OPCODE_CLEAR_ALL 0x2
576
577#define UMAC_STA_FLAG_QOS 0x1
578
579struct iwm_umac_notif_sta_info {
580 struct iwm_umac_notif_wifi_if mlme_hdr;
581 __le32 opcode;
582 u8 mac_addr[ETH_ALEN];
583 u8 sta_id; /* bits 0-3: station ID, bits 4-7: station color */
584 u8 flags;
585} __packed;
586
587#define UMAC_BAND_2GHZ 0
588#define UMAC_BAND_5GHZ 1
589
590#define UMAC_CHANNEL_WIDTH_20MHZ 0
591#define UMAC_CHANNEL_WIDTH_40MHZ 1
592
593struct iwm_umac_notif_bss_info {
594 struct iwm_umac_notif_wifi_if mlme_hdr;
595 __le32 type;
596 __le32 timestamp;
597 __le16 table_idx;
598 __le16 frame_len;
599 u8 band;
600 u8 channel;
601 s8 rssi;
602 u8 reserved;
603 u8 frame_buf[1];
604} __packed;
605
606#define IWM_BSS_REMOVE_INDEX_MSK 0x0fff
607#define IWM_BSS_REMOVE_FLAGS_MSK 0xfc00
608
609#define IWM_BSS_REMOVE_FLG_AGE 0x1000
610#define IWM_BSS_REMOVE_FLG_TIMEOUT 0x2000
611#define IWM_BSS_REMOVE_FLG_TABLE_FULL 0x4000
612
613struct iwm_umac_notif_bss_removed {
614 struct iwm_umac_notif_wifi_if mlme_hdr;
615 __le32 count;
616 __le16 entries[0];
617} __packed;
618
619struct iwm_umac_notif_mgt_frame {
620 struct iwm_umac_notif_wifi_if mlme_hdr;
621 __le16 len;
622 u8 frame[1];
623} __packed;
624
625struct iwm_umac_notif_alive {
626 struct iwm_umac_wifi_in_hdr hdr;
627 __le16 status;
628 __le16 reserved1;
629 struct iwm_fw_alive_hdr alive_data;
630 __le16 reserved2;
631 __le16 page_grp_count;
632 __le32 page_grp_state[IWM_MACS_OUT_GROUPS];
633} __packed;
634
635struct iwm_umac_notif_init_complete {
636 struct iwm_umac_wifi_in_hdr hdr;
637 __le16 status;
638 __le16 reserved;
639} __packed;
640
641/* error categories */
642enum {
643 UMAC_SYS_ERR_CAT_NONE = 0,
644 UMAC_SYS_ERR_CAT_BOOT,
645 UMAC_SYS_ERR_CAT_UMAC,
646 UMAC_SYS_ERR_CAT_UAXM,
647 UMAC_SYS_ERR_CAT_LMAC,
648 UMAC_SYS_ERR_CAT_MAX
649};
650
651struct iwm_fw_error_hdr {
652 __le32 category;
653 __le32 status;
654 __le32 pc;
655 __le32 blink1;
656 __le32 blink2;
657 __le32 ilink1;
658 __le32 ilink2;
659 __le32 data1;
660 __le32 data2;
661 __le32 line_num;
662 __le32 umac_status;
663 __le32 lmac_status;
664 __le32 sdio_status;
665 __le32 dbm_sample_ctrl;
666 __le32 dbm_buf_base;
667 __le32 dbm_buf_end;
668 __le32 dbm_buf_write_ptr;
669 __le32 dbm_buf_cycle_cnt;
670} __packed;
671
672struct iwm_umac_notif_error {
673 struct iwm_umac_wifi_in_hdr hdr;
674 struct iwm_fw_error_hdr err;
675} __packed;
676
677#define UMAC_DEALLOC_NTFY_CHANGES_CNT_POS 0
678#define UMAC_DEALLOC_NTFY_CHANGES_CNT_SEED 0xff
679#define UMAC_DEALLOC_NTFY_CHANGES_MSK_POS 8
680#define UMAC_DEALLOC_NTFY_CHANGES_MSK_SEED 0xffffff
681#define UMAC_DEALLOC_NTFY_PAGE_CNT_POS 0
682#define UMAC_DEALLOC_NTFY_PAGE_CNT_SEED 0xffffff
683#define UMAC_DEALLOC_NTFY_GROUP_NUM_POS 24
684#define UMAC_DEALLOC_NTFY_GROUP_NUM_SEED 0xf
685
686struct iwm_umac_notif_page_dealloc {
687 struct iwm_umac_wifi_in_hdr hdr;
688 __le32 changes;
689 __le32 grp_info[IWM_MACS_OUT_GROUPS];
690} __packed;
691
692struct iwm_umac_notif_wifi_status {
693 struct iwm_umac_wifi_in_hdr hdr;
694 __le16 status;
695 __le16 reserved;
696} __packed;
697
698struct iwm_umac_notif_rx_ticket {
699 struct iwm_umac_wifi_in_hdr hdr;
700 u8 num_tickets;
701 u8 reserved[3];
702 struct iwm_rx_ticket tickets[1];
703} __packed;
704
705/* Tx/Rx rates window (number of max of last update window per second) */
706#define UMAC_NTF_RATE_SAMPLE_NR 4
707
708/* Max numbers of bits required to go through all antennae in bitmasks */
709#define UMAC_PHY_NUM_CHAINS 3
710
711#define IWM_UMAC_MGMT_TID 8
712#define IWM_UMAC_TID_NR 9 /* 8 TIDs + MGMT */
713
714struct iwm_umac_notif_stats {
715 struct iwm_umac_wifi_in_hdr hdr;
716 __le32 flags;
717 __le32 timestamp;
718 __le16 tid_load[IWM_UMAC_TID_NR + 1]; /* 1 non-QoS + 1 dword align */
719 __le16 tx_rate[UMAC_NTF_RATE_SAMPLE_NR];
720 __le16 rx_rate[UMAC_NTF_RATE_SAMPLE_NR];
721 __le32 chain_energy[UMAC_PHY_NUM_CHAINS];
722 s32 rssi_dbm;
723 s32 noise_dbm;
724 __le32 supp_rates;
725 __le32 supp_ht_rates;
726 __le32 missed_beacons;
727 __le32 rx_beacons;
728 __le32 rx_dir_pkts;
729 __le32 rx_nondir_pkts;
730 __le32 rx_multicast;
731 __le32 rx_errors;
732 __le32 rx_drop_other_bssid;
733 __le32 rx_drop_decode;
734 __le32 rx_drop_reassembly;
735 __le32 rx_drop_bad_len;
736 __le32 rx_drop_overflow;
737 __le32 rx_drop_crc;
738 __le32 rx_drop_missed;
739 __le32 tx_dir_pkts;
740 __le32 tx_nondir_pkts;
741 __le32 tx_failure;
742 __le32 tx_errors;
743 __le32 tx_drop_max_retry;
744 __le32 tx_err_abort;
745 __le32 tx_err_carrier;
746 __le32 rx_bytes;
747 __le32 tx_bytes;
748 __le32 tx_power;
749 __le32 tx_max_power;
750 __le32 roam_threshold;
751 __le32 ap_assoc_nr;
752 __le32 scan_full;
753 __le32 scan_abort;
754 __le32 ap_nr;
755 __le32 roam_nr;
756 __le32 roam_missed_beacons;
757 __le32 roam_rssi;
758 __le32 roam_unassoc;
759 __le32 roam_deauth;
760 __le32 roam_ap_loadblance;
761} __packed;
762
763#define UMAC_STOP_TX_FLAG 0x1
764#define UMAC_RESUME_TX_FLAG 0x2
765
766#define LAST_SEQ_NUM_INVALID 0xFFFF
767
768struct iwm_umac_notif_stop_resume_tx {
769 struct iwm_umac_wifi_in_hdr hdr;
770 u8 flags; /* UMAC_*_TX_FLAG_* */
771 u8 sta_id;
772 __le16 stop_resume_tid_msk; /* tid bitmask */
773} __packed;
774
775#define UMAC_MAX_NUM_PMKIDS 4
776
777/* WiFi interface wrapper header */
778struct iwm_umac_wifi_if {
779 u8 oid;
780 u8 flags;
781 __le16 buf_size;
782} __packed;
783
784#define IWM_SEQ_NUM_HOST_MSK 0x0000
785#define IWM_SEQ_NUM_UMAC_MSK 0x4000
786#define IWM_SEQ_NUM_LMAC_MSK 0x8000
787#define IWM_SEQ_NUM_MSK 0xC000
788
789#endif
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 2fa879b015b6..f4a203049fb4 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -435,24 +435,40 @@ static int lbs_add_wpa_tlv(u8 *tlv, const u8 *ie, u8 ie_len)
435 * Set Channel 435 * Set Channel
436 */ 436 */
437 437
438static int lbs_cfg_set_channel(struct wiphy *wiphy, 438static int lbs_cfg_set_monitor_channel(struct wiphy *wiphy,
439 struct net_device *netdev, 439 struct ieee80211_channel *channel,
440 struct ieee80211_channel *channel, 440 enum nl80211_channel_type channel_type)
441 enum nl80211_channel_type channel_type)
442{ 441{
443 struct lbs_private *priv = wiphy_priv(wiphy); 442 struct lbs_private *priv = wiphy_priv(wiphy);
444 int ret = -ENOTSUPP; 443 int ret = -ENOTSUPP;
445 444
446 lbs_deb_enter_args(LBS_DEB_CFG80211, "iface %s freq %d, type %d", 445 lbs_deb_enter_args(LBS_DEB_CFG80211, "freq %d, type %d",
447 netdev_name(netdev), channel->center_freq, channel_type); 446 channel->center_freq, channel_type);
448 447
449 if (channel_type != NL80211_CHAN_NO_HT) 448 if (channel_type != NL80211_CHAN_NO_HT)
450 goto out; 449 goto out;
451 450
452 if (netdev == priv->mesh_dev) 451 ret = lbs_set_channel(priv, channel->hw_value);
453 ret = lbs_mesh_set_channel(priv, channel->hw_value); 452
454 else 453 out:
455 ret = lbs_set_channel(priv, channel->hw_value); 454 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
455 return ret;
456}
457
458static int lbs_cfg_set_mesh_channel(struct wiphy *wiphy,
459 struct net_device *netdev,
460 struct ieee80211_channel *channel)
461{
462 struct lbs_private *priv = wiphy_priv(wiphy);
463 int ret = -ENOTSUPP;
464
465 lbs_deb_enter_args(LBS_DEB_CFG80211, "iface %s freq %d",
466 netdev_name(netdev), channel->center_freq);
467
468 if (netdev != priv->mesh_dev)
469 goto out;
470
471 ret = lbs_mesh_set_channel(priv, channel->hw_value);
456 472
457 out: 473 out:
458 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); 474 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
@@ -2029,7 +2045,8 @@ static int lbs_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
2029 */ 2045 */
2030 2046
2031static struct cfg80211_ops lbs_cfg80211_ops = { 2047static struct cfg80211_ops lbs_cfg80211_ops = {
2032 .set_channel = lbs_cfg_set_channel, 2048 .set_monitor_channel = lbs_cfg_set_monitor_channel,
2049 .libertas_set_mesh_channel = lbs_cfg_set_mesh_channel,
2033 .scan = lbs_cfg_scan, 2050 .scan = lbs_cfg_scan,
2034 .connect = lbs_cfg_connect, 2051 .connect = lbs_cfg_connect,
2035 .disconnect = lbs_cfg_disconnect, 2052 .disconnect = lbs_cfg_disconnect,
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index a06cc283e23d..668dd27616a0 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -483,7 +483,7 @@ static ssize_t lbs_rdmac_write(struct file *file,
483 res = -EFAULT; 483 res = -EFAULT;
484 goto out_unlock; 484 goto out_unlock;
485 } 485 }
486 priv->mac_offset = simple_strtoul((char *)buf, NULL, 16); 486 priv->mac_offset = simple_strtoul(buf, NULL, 16);
487 res = count; 487 res = count;
488out_unlock: 488out_unlock:
489 free_page(addr); 489 free_page(addr);
@@ -565,7 +565,7 @@ static ssize_t lbs_rdbbp_write(struct file *file,
565 res = -EFAULT; 565 res = -EFAULT;
566 goto out_unlock; 566 goto out_unlock;
567 } 567 }
568 priv->bbp_offset = simple_strtoul((char *)buf, NULL, 16); 568 priv->bbp_offset = simple_strtoul(buf, NULL, 16);
569 res = count; 569 res = count;
570out_unlock: 570out_unlock:
571 free_page(addr); 571 free_page(addr);
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index 672005430aca..60996ce89f77 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -58,6 +58,7 @@ struct lbs_private {
58 uint16_t mesh_tlv; 58 uint16_t mesh_tlv;
59 u8 mesh_ssid[IEEE80211_MAX_SSID_LEN + 1]; 59 u8 mesh_ssid[IEEE80211_MAX_SSID_LEN + 1];
60 u8 mesh_ssid_len; 60 u8 mesh_ssid_len;
61 u8 mesh_channel;
61#endif 62#endif
62 63
63 /* Debugfs */ 64 /* Debugfs */
diff --git a/drivers/net/wireless/libertas/host.h b/drivers/net/wireless/libertas/host.h
index 2e2dbfa2ee50..96726f79a1dd 100644
--- a/drivers/net/wireless/libertas/host.h
+++ b/drivers/net/wireless/libertas/host.h
@@ -68,7 +68,6 @@
68#define CMD_802_11_BEACON_STOP 0x0049 68#define CMD_802_11_BEACON_STOP 0x0049
69#define CMD_802_11_MAC_ADDRESS 0x004d 69#define CMD_802_11_MAC_ADDRESS 0x004d
70#define CMD_802_11_LED_GPIO_CTRL 0x004e 70#define CMD_802_11_LED_GPIO_CTRL 0x004e
71#define CMD_802_11_EEPROM_ACCESS 0x0059
72#define CMD_802_11_BAND_CONFIG 0x0058 71#define CMD_802_11_BAND_CONFIG 0x0058
73#define CMD_GSPI_BUS_CONFIG 0x005a 72#define CMD_GSPI_BUS_CONFIG 0x005a
74#define CMD_802_11D_DOMAIN_INFO 0x005b 73#define CMD_802_11D_DOMAIN_INFO 0x005b
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index cd3b0d400618..64b7dc5de126 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -302,7 +302,7 @@ error:
302static void if_usb_disconnect(struct usb_interface *intf) 302static void if_usb_disconnect(struct usb_interface *intf)
303{ 303{
304 struct if_usb_card *cardp = usb_get_intfdata(intf); 304 struct if_usb_card *cardp = usb_get_intfdata(intf);
305 struct lbs_private *priv = (struct lbs_private *) cardp->priv; 305 struct lbs_private *priv = cardp->priv;
306 306
307 lbs_deb_enter(LBS_DEB_MAIN); 307 lbs_deb_enter(LBS_DEB_MAIN);
308 308
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index e87c031b298f..97807751ebcf 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -131,16 +131,13 @@ static int lbs_mesh_config(struct lbs_private *priv, uint16_t action,
131 131
132int lbs_mesh_set_channel(struct lbs_private *priv, u8 channel) 132int lbs_mesh_set_channel(struct lbs_private *priv, u8 channel)
133{ 133{
134 priv->mesh_channel = channel;
134 return lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, channel); 135 return lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, channel);
135} 136}
136 137
137static uint16_t lbs_mesh_get_channel(struct lbs_private *priv) 138static uint16_t lbs_mesh_get_channel(struct lbs_private *priv)
138{ 139{
139 struct wireless_dev *mesh_wdev = priv->mesh_dev->ieee80211_ptr; 140 return priv->mesh_channel ?: 1;
140 if (mesh_wdev->channel)
141 return mesh_wdev->channel->hw_value;
142 else
143 return 1;
144} 141}
145 142
146/*************************************************************************** 143/***************************************************************************
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
index 19a5a92dd779..d576dd6665d3 100644
--- a/drivers/net/wireless/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -253,7 +253,7 @@ lbtf_deb_leave(LBTF_DEB_MAIN);
253static void if_usb_disconnect(struct usb_interface *intf) 253static void if_usb_disconnect(struct usb_interface *intf)
254{ 254{
255 struct if_usb_card *cardp = usb_get_intfdata(intf); 255 struct if_usb_card *cardp = usb_get_intfdata(intf);
256 struct lbtf_private *priv = (struct lbtf_private *) cardp->priv; 256 struct lbtf_private *priv = cardp->priv;
257 257
258 lbtf_deb_enter(LBTF_DEB_MAIN); 258 lbtf_deb_enter(LBTF_DEB_MAIN);
259 259
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index a0b7cfd34685..200bcc0ead98 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -292,7 +292,7 @@ struct mac80211_hwsim_data {
292 struct list_head list; 292 struct list_head list;
293 struct ieee80211_hw *hw; 293 struct ieee80211_hw *hw;
294 struct device *dev; 294 struct device *dev;
295 struct ieee80211_supported_band bands[2]; 295 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
296 struct ieee80211_channel channels_2ghz[ARRAY_SIZE(hwsim_channels_2ghz)]; 296 struct ieee80211_channel channels_2ghz[ARRAY_SIZE(hwsim_channels_2ghz)];
297 struct ieee80211_channel channels_5ghz[ARRAY_SIZE(hwsim_channels_5ghz)]; 297 struct ieee80211_channel channels_5ghz[ARRAY_SIZE(hwsim_channels_5ghz)];
298 struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)]; 298 struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)];
@@ -571,7 +571,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
571 skb_dequeue(&data->pending); 571 skb_dequeue(&data->pending);
572 } 572 }
573 573
574 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); 574 skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC);
575 if (skb == NULL) 575 if (skb == NULL)
576 goto nla_put_failure; 576 goto nla_put_failure;
577 577
@@ -678,8 +678,7 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
678 continue; 678 continue;
679 679
680 if (data2->idle || !data2->started || 680 if (data2->idle || !data2->started ||
681 !hwsim_ps_rx_ok(data2, skb) || 681 !hwsim_ps_rx_ok(data2, skb) || !data2->channel ||
682 !data->channel || !data2->channel ||
683 data->channel->center_freq != data2->channel->center_freq || 682 data->channel->center_freq != data2->channel->center_freq ||
684 !(data->group & data2->group)) 683 !(data->group & data2->group))
685 continue; 684 continue;
@@ -1083,6 +1082,8 @@ enum hwsim_testmode_attr {
1083enum hwsim_testmode_cmd { 1082enum hwsim_testmode_cmd {
1084 HWSIM_TM_CMD_SET_PS = 0, 1083 HWSIM_TM_CMD_SET_PS = 0,
1085 HWSIM_TM_CMD_GET_PS = 1, 1084 HWSIM_TM_CMD_GET_PS = 1,
1085 HWSIM_TM_CMD_STOP_QUEUES = 2,
1086 HWSIM_TM_CMD_WAKE_QUEUES = 3,
1086}; 1087};
1087 1088
1088static const struct nla_policy hwsim_testmode_policy[HWSIM_TM_ATTR_MAX + 1] = { 1089static const struct nla_policy hwsim_testmode_policy[HWSIM_TM_ATTR_MAX + 1] = {
@@ -1122,6 +1123,12 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
1122 if (nla_put_u32(skb, HWSIM_TM_ATTR_PS, hwsim->ps)) 1123 if (nla_put_u32(skb, HWSIM_TM_ATTR_PS, hwsim->ps))
1123 goto nla_put_failure; 1124 goto nla_put_failure;
1124 return cfg80211_testmode_reply(skb); 1125 return cfg80211_testmode_reply(skb);
1126 case HWSIM_TM_CMD_STOP_QUEUES:
1127 ieee80211_stop_queues(hw);
1128 return 0;
1129 case HWSIM_TM_CMD_WAKE_QUEUES:
1130 ieee80211_wake_queues(hw);
1131 return 0;
1125 default: 1132 default:
1126 return -EOPNOTSUPP; 1133 return -EOPNOTSUPP;
1127 } 1134 }
@@ -1486,7 +1493,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
1486 struct mac80211_hwsim_data *data2; 1493 struct mac80211_hwsim_data *data2;
1487 struct ieee80211_tx_info *txi; 1494 struct ieee80211_tx_info *txi;
1488 struct hwsim_tx_rate *tx_attempts; 1495 struct hwsim_tx_rate *tx_attempts;
1489 struct sk_buff __user *ret_skb; 1496 unsigned long ret_skb_ptr;
1490 struct sk_buff *skb, *tmp; 1497 struct sk_buff *skb, *tmp;
1491 struct mac_address *src; 1498 struct mac_address *src;
1492 unsigned int hwsim_flags; 1499 unsigned int hwsim_flags;
@@ -1504,8 +1511,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
1504 info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER]); 1511 info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER]);
1505 hwsim_flags = nla_get_u32(info->attrs[HWSIM_ATTR_FLAGS]); 1512 hwsim_flags = nla_get_u32(info->attrs[HWSIM_ATTR_FLAGS]);
1506 1513
1507 ret_skb = (struct sk_buff __user *) 1514 ret_skb_ptr = nla_get_u64(info->attrs[HWSIM_ATTR_COOKIE]);
1508 (unsigned long) nla_get_u64(info->attrs[HWSIM_ATTR_COOKIE]);
1509 1515
1510 data2 = get_hwsim_data_ref_from_addr(src); 1516 data2 = get_hwsim_data_ref_from_addr(src);
1511 1517
@@ -1514,7 +1520,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
1514 1520
1515 /* look for the skb matching the cookie passed back from user */ 1521 /* look for the skb matching the cookie passed back from user */
1516 skb_queue_walk_safe(&data2->pending, skb, tmp) { 1522 skb_queue_walk_safe(&data2->pending, skb, tmp) {
1517 if (skb == ret_skb) { 1523 if ((unsigned long)skb == ret_skb_ptr) {
1518 skb_unlink(skb, &data2->pending); 1524 skb_unlink(skb, &data2->pending);
1519 found = true; 1525 found = true;
1520 break; 1526 break;
@@ -1857,7 +1863,7 @@ static int __init init_mac80211_hwsim(void)
1857 sband->n_bitrates = ARRAY_SIZE(hwsim_rates) - 4; 1863 sband->n_bitrates = ARRAY_SIZE(hwsim_rates) - 4;
1858 break; 1864 break;
1859 default: 1865 default:
1860 break; 1866 continue;
1861 } 1867 }
1862 1868
1863 sband->ht_cap.ht_supported = true; 1869 sband->ht_cap.ht_supported = true;
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index fe8ebfebcc0e..e535c937628b 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -101,8 +101,7 @@ int mwifiex_ret_11n_delba(struct mwifiex_private *priv,
101{ 101{
102 int tid; 102 int tid;
103 struct mwifiex_tx_ba_stream_tbl *tx_ba_tbl; 103 struct mwifiex_tx_ba_stream_tbl *tx_ba_tbl;
104 struct host_cmd_ds_11n_delba *del_ba = 104 struct host_cmd_ds_11n_delba *del_ba = &resp->params.del_ba;
105 (struct host_cmd_ds_11n_delba *) &resp->params.del_ba;
106 uint16_t del_ba_param_set = le16_to_cpu(del_ba->del_ba_param_set); 105 uint16_t del_ba_param_set = le16_to_cpu(del_ba->del_ba_param_set);
107 106
108 tid = del_ba_param_set >> DELBA_TID_POS; 107 tid = del_ba_param_set >> DELBA_TID_POS;
@@ -147,8 +146,7 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
147 struct host_cmd_ds_command *resp) 146 struct host_cmd_ds_command *resp)
148{ 147{
149 int tid; 148 int tid;
150 struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = 149 struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &resp->params.add_ba_rsp;
151 (struct host_cmd_ds_11n_addba_rsp *) &resp->params.add_ba_rsp;
152 struct mwifiex_tx_ba_stream_tbl *tx_ba_tbl; 150 struct mwifiex_tx_ba_stream_tbl *tx_ba_tbl;
153 151
154 add_ba_rsp->ssn = cpu_to_le16((le16_to_cpu(add_ba_rsp->ssn)) 152 add_ba_rsp->ssn = cpu_to_le16((le16_to_cpu(add_ba_rsp->ssn))
@@ -412,7 +410,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
412 410
413 memcpy((u8 *) bss_co_2040 + 411 memcpy((u8 *) bss_co_2040 +
414 sizeof(struct mwifiex_ie_types_header), 412 sizeof(struct mwifiex_ie_types_header),
415 (u8 *) bss_desc->bcn_bss_co_2040 + 413 bss_desc->bcn_bss_co_2040 +
416 sizeof(struct ieee_types_header), 414 sizeof(struct ieee_types_header),
417 le16_to_cpu(bss_co_2040->header.len)); 415 le16_to_cpu(bss_co_2040->header.len));
418 416
@@ -426,10 +424,8 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
426 ext_cap->header.type = cpu_to_le16(WLAN_EID_EXT_CAPABILITY); 424 ext_cap->header.type = cpu_to_le16(WLAN_EID_EXT_CAPABILITY);
427 ext_cap->header.len = cpu_to_le16(sizeof(ext_cap->ext_cap)); 425 ext_cap->header.len = cpu_to_le16(sizeof(ext_cap->ext_cap));
428 426
429 memcpy((u8 *) ext_cap + 427 memcpy((u8 *)ext_cap + sizeof(struct mwifiex_ie_types_header),
430 sizeof(struct mwifiex_ie_types_header), 428 bss_desc->bcn_ext_cap + sizeof(struct ieee_types_header),
431 (u8 *) bss_desc->bcn_ext_cap +
432 sizeof(struct ieee_types_header),
433 le16_to_cpu(ext_cap->header.len)); 429 le16_to_cpu(ext_cap->header.len));
434 430
435 *buffer += sizeof(struct mwifiex_ie_types_extcap); 431 *buffer += sizeof(struct mwifiex_ie_types_extcap);
diff --git a/drivers/net/wireless/mwifiex/11n.h b/drivers/net/wireless/mwifiex/11n.h
index 77646d777dce..28366e9211fb 100644
--- a/drivers/net/wireless/mwifiex/11n.h
+++ b/drivers/net/wireless/mwifiex/11n.h
@@ -105,8 +105,7 @@ static inline u8 mwifiex_space_avail_for_new_ba_stream(
105 priv = adapter->priv[i]; 105 priv = adapter->priv[i];
106 if (priv) 106 if (priv)
107 ba_stream_num += mwifiex_wmm_list_len( 107 ba_stream_num += mwifiex_wmm_list_len(
108 (struct list_head *) 108 &priv->tx_ba_stream_tbl_ptr);
109 &priv->tx_ba_stream_tbl_ptr);
110 } 109 }
111 110
112 return ((ba_stream_num < 111 return ((ba_stream_num <
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index 900ee129e825..591ccd33f83c 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -297,9 +297,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
297 */ 297 */
298int mwifiex_cmd_11n_addba_req(struct host_cmd_ds_command *cmd, void *data_buf) 298int mwifiex_cmd_11n_addba_req(struct host_cmd_ds_command *cmd, void *data_buf)
299{ 299{
300 struct host_cmd_ds_11n_addba_req *add_ba_req = 300 struct host_cmd_ds_11n_addba_req *add_ba_req = &cmd->params.add_ba_req;
301 (struct host_cmd_ds_11n_addba_req *)
302 &cmd->params.add_ba_req;
303 301
304 cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_REQ); 302 cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_REQ);
305 cmd->size = cpu_to_le16(sizeof(*add_ba_req) + S_DS_GEN); 303 cmd->size = cpu_to_le16(sizeof(*add_ba_req) + S_DS_GEN);
@@ -321,9 +319,7 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
321 struct host_cmd_ds_11n_addba_req 319 struct host_cmd_ds_11n_addba_req
322 *cmd_addba_req) 320 *cmd_addba_req)
323{ 321{
324 struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = 322 struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &cmd->params.add_ba_rsp;
325 (struct host_cmd_ds_11n_addba_rsp *)
326 &cmd->params.add_ba_rsp;
327 u8 tid; 323 u8 tid;
328 int win_size; 324 int win_size;
329 uint16_t block_ack_param_set; 325 uint16_t block_ack_param_set;
@@ -368,8 +364,7 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
368 */ 364 */
369int mwifiex_cmd_11n_delba(struct host_cmd_ds_command *cmd, void *data_buf) 365int mwifiex_cmd_11n_delba(struct host_cmd_ds_command *cmd, void *data_buf)
370{ 366{
371 struct host_cmd_ds_11n_delba *del_ba = (struct host_cmd_ds_11n_delba *) 367 struct host_cmd_ds_11n_delba *del_ba = &cmd->params.del_ba;
372 &cmd->params.del_ba;
373 368
374 cmd->command = cpu_to_le16(HostCmd_CMD_11N_DELBA); 369 cmd->command = cpu_to_le16(HostCmd_CMD_11N_DELBA);
375 cmd->size = cpu_to_le16(sizeof(*del_ba) + S_DS_GEN); 370 cmd->size = cpu_to_le16(sizeof(*del_ba) + S_DS_GEN);
@@ -399,8 +394,7 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
399 int start_win, end_win, win_size; 394 int start_win, end_win, win_size;
400 u16 pkt_index; 395 u16 pkt_index;
401 396
402 tbl = mwifiex_11n_get_rx_reorder_tbl((struct mwifiex_private *) priv, 397 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
403 tid, ta);
404 if (!tbl) { 398 if (!tbl) {
405 if (pkt_type != PKT_TYPE_BAR) 399 if (pkt_type != PKT_TYPE_BAR)
406 mwifiex_process_rx_packet(priv->adapter, payload); 400 mwifiex_process_rx_packet(priv->adapter, payload);
@@ -521,9 +515,7 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
521int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv, 515int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
522 struct host_cmd_ds_command *resp) 516 struct host_cmd_ds_command *resp)
523{ 517{
524 struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = 518 struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &resp->params.add_ba_rsp;
525 (struct host_cmd_ds_11n_addba_rsp *)
526 &resp->params.add_ba_rsp;
527 int tid, win_size; 519 int tid, win_size;
528 struct mwifiex_rx_reorder_tbl *tbl; 520 struct mwifiex_rx_reorder_tbl *tbl;
529 uint16_t block_ack_param_set; 521 uint16_t block_ack_param_set;
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 5c7fd185373c..c7a177c62625 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -170,7 +170,9 @@ mwifiex_cfg80211_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
170 if (!priv->sec_info.wep_enabled) 170 if (!priv->sec_info.wep_enabled)
171 return 0; 171 return 0;
172 172
173 if (mwifiex_set_encode(priv, NULL, 0, key_index, NULL, 0)) { 173 if (priv->bss_type == MWIFIEX_BSS_TYPE_UAP) {
174 priv->wep_key_curr_index = key_index;
175 } else if (mwifiex_set_encode(priv, NULL, 0, key_index, NULL, 0)) {
174 wiphy_err(wiphy, "set default Tx key index\n"); 176 wiphy_err(wiphy, "set default Tx key index\n");
175 return -EFAULT; 177 return -EFAULT;
176 } 178 }
@@ -187,9 +189,25 @@ mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev,
187 struct key_params *params) 189 struct key_params *params)
188{ 190{
189 struct mwifiex_private *priv = mwifiex_netdev_get_priv(netdev); 191 struct mwifiex_private *priv = mwifiex_netdev_get_priv(netdev);
192 struct mwifiex_wep_key *wep_key;
190 const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 193 const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
191 const u8 *peer_mac = pairwise ? mac_addr : bc_mac; 194 const u8 *peer_mac = pairwise ? mac_addr : bc_mac;
192 195
196 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP &&
197 (params->cipher == WLAN_CIPHER_SUITE_WEP40 ||
198 params->cipher == WLAN_CIPHER_SUITE_WEP104)) {
199 if (params->key && params->key_len) {
200 wep_key = &priv->wep_key[key_index];
201 memset(wep_key, 0, sizeof(struct mwifiex_wep_key));
202 memcpy(wep_key->key_material, params->key,
203 params->key_len);
204 wep_key->key_index = key_index;
205 wep_key->key_length = params->key_len;
206 priv->sec_info.wep_enabled = 1;
207 }
208 return 0;
209 }
210
193 if (mwifiex_set_encode(priv, params->key, params->key_len, 211 if (mwifiex_set_encode(priv, params->key, params->key_len,
194 key_index, peer_mac, 0)) { 212 key_index, peer_mac, 0)) {
195 wiphy_err(wiphy, "crypto keys added\n"); 213 wiphy_err(wiphy, "crypto keys added\n");
@@ -242,13 +260,13 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
242 flag = 1; 260 flag = 1;
243 first_chan = (u32) ch->hw_value; 261 first_chan = (u32) ch->hw_value;
244 next_chan = first_chan; 262 next_chan = first_chan;
245 max_pwr = ch->max_power; 263 max_pwr = ch->max_reg_power;
246 no_of_parsed_chan = 1; 264 no_of_parsed_chan = 1;
247 continue; 265 continue;
248 } 266 }
249 267
250 if (ch->hw_value == next_chan + 1 && 268 if (ch->hw_value == next_chan + 1 &&
251 ch->max_power == max_pwr) { 269 ch->max_reg_power == max_pwr) {
252 next_chan++; 270 next_chan++;
253 no_of_parsed_chan++; 271 no_of_parsed_chan++;
254 } else { 272 } else {
@@ -259,7 +277,7 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
259 no_of_triplet++; 277 no_of_triplet++;
260 first_chan = (u32) ch->hw_value; 278 first_chan = (u32) ch->hw_value;
261 next_chan = first_chan; 279 next_chan = first_chan;
262 max_pwr = ch->max_power; 280 max_pwr = ch->max_reg_power;
263 no_of_parsed_chan = 1; 281 no_of_parsed_chan = 1;
264 } 282 }
265 } 283 }
@@ -384,13 +402,13 @@ mwifiex_set_rf_channel(struct mwifiex_private *priv,
384 cfp.freq = chan->center_freq; 402 cfp.freq = chan->center_freq;
385 cfp.channel = ieee80211_frequency_to_channel(chan->center_freq); 403 cfp.channel = ieee80211_frequency_to_channel(chan->center_freq);
386 404
387 if (mwifiex_bss_set_channel(priv, &cfp)) 405 if (priv->bss_type == MWIFIEX_BSS_TYPE_STA) {
388 return -EFAULT; 406 if (mwifiex_bss_set_channel(priv, &cfp))
389 407 return -EFAULT;
390 if (priv->bss_type == MWIFIEX_BSS_TYPE_STA)
391 return mwifiex_drv_change_adhoc_chan(priv, cfp.channel); 408 return mwifiex_drv_change_adhoc_chan(priv, cfp.channel);
392 else 409 }
393 return mwifiex_uap_set_channel(priv, cfp.channel); 410
411 return 0;
394} 412}
395 413
396/* 414/*
@@ -896,6 +914,69 @@ static int mwifiex_cfg80211_set_cqm_rssi_config(struct wiphy *wiphy,
896 return 0; 914 return 0;
897} 915}
898 916
917/* cfg80211 operation handler for change_beacon.
918 * Function retrieves and sets modified management IEs to FW.
919 */
920static int mwifiex_cfg80211_change_beacon(struct wiphy *wiphy,
921 struct net_device *dev,
922 struct cfg80211_beacon_data *data)
923{
924 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
925
926 if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
927 wiphy_err(wiphy, "%s: bss_type mismatched\n", __func__);
928 return -EINVAL;
929 }
930
931 if (!priv->bss_started) {
932 wiphy_err(wiphy, "%s: bss not started\n", __func__);
933 return -EINVAL;
934 }
935
936 if (mwifiex_set_mgmt_ies(priv, data)) {
937 wiphy_err(wiphy, "%s: setting mgmt ies failed\n", __func__);
938 return -EFAULT;
939 }
940
941 return 0;
942}
943
944static int
945mwifiex_cfg80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant)
946{
947 struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
948 struct mwifiex_private *priv = mwifiex_get_priv(adapter,
949 MWIFIEX_BSS_ROLE_ANY);
950 struct mwifiex_ds_ant_cfg ant_cfg;
951
952 if (!tx_ant || !rx_ant)
953 return -EOPNOTSUPP;
954
955 if (adapter->hw_dev_mcs_support != HT_STREAM_2X2) {
956 /* Not a MIMO chip. User should provide specific antenna number
957 * for Tx/Rx path or enable all antennas for diversity
958 */
959 if (tx_ant != rx_ant)
960 return -EOPNOTSUPP;
961
962 if ((tx_ant & (tx_ant - 1)) &&
963 (tx_ant != BIT(adapter->number_of_antenna) - 1))
964 return -EOPNOTSUPP;
965
966 if ((tx_ant == BIT(adapter->number_of_antenna) - 1) &&
967 (priv->adapter->number_of_antenna > 1)) {
968 tx_ant = RF_ANTENNA_AUTO;
969 rx_ant = RF_ANTENNA_AUTO;
970 }
971 }
972
973 ant_cfg.tx_ant = tx_ant;
974 ant_cfg.rx_ant = rx_ant;
975
976 return mwifiex_send_cmd_sync(priv, HostCmd_CMD_RF_ANTENNA,
977 HostCmd_ACT_GEN_SET, 0, &ant_cfg);
978}
979
899/* cfg80211 operation handler for stop ap. 980/* cfg80211 operation handler for stop ap.
900 * Function stops BSS running at uAP interface. 981 * Function stops BSS running at uAP interface.
901 */ 982 */
@@ -929,7 +1010,7 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
929 1010
930 if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) 1011 if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP)
931 return -1; 1012 return -1;
932 if (mwifiex_set_mgmt_ies(priv, params)) 1013 if (mwifiex_set_mgmt_ies(priv, &params->beacon))
933 return -1; 1014 return -1;
934 1015
935 bss_cfg = kzalloc(sizeof(struct mwifiex_uap_bss_param), GFP_KERNEL); 1016 bss_cfg = kzalloc(sizeof(struct mwifiex_uap_bss_param), GFP_KERNEL);
@@ -962,12 +1043,25 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
962 return -EINVAL; 1043 return -EINVAL;
963 } 1044 }
964 1045
1046 bss_cfg->channel =
1047 (u8)ieee80211_frequency_to_channel(params->channel->center_freq);
1048 bss_cfg->band_cfg = BAND_CONFIG_MANUAL;
1049
1050 if (mwifiex_set_rf_channel(priv, params->channel,
1051 params->channel_type)) {
1052 kfree(bss_cfg);
1053 wiphy_err(wiphy, "Failed to set band config information!\n");
1054 return -1;
1055 }
1056
965 if (mwifiex_set_secure_params(priv, bss_cfg, params)) { 1057 if (mwifiex_set_secure_params(priv, bss_cfg, params)) {
966 kfree(bss_cfg); 1058 kfree(bss_cfg);
967 wiphy_err(wiphy, "Failed to parse secuirty parameters!\n"); 1059 wiphy_err(wiphy, "Failed to parse secuirty parameters!\n");
968 return -1; 1060 return -1;
969 } 1061 }
970 1062
1063 mwifiex_set_ht_params(priv, bss_cfg, params);
1064
971 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP, 1065 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP,
972 HostCmd_ACT_GEN_SET, 0, NULL)) { 1066 HostCmd_ACT_GEN_SET, 0, NULL)) {
973 wiphy_err(wiphy, "Failed to stop the BSS\n"); 1067 wiphy_err(wiphy, "Failed to stop the BSS\n");
@@ -991,6 +1085,16 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
991 return -1; 1085 return -1;
992 } 1086 }
993 1087
1088 if (priv->sec_info.wep_enabled)
1089 priv->curr_pkt_filter |= HostCmd_ACT_MAC_WEP_ENABLE;
1090 else
1091 priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_WEP_ENABLE;
1092
1093 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_MAC_CONTROL,
1094 HostCmd_ACT_GEN_SET, 0,
1095 &priv->curr_pkt_filter))
1096 return -1;
1097
994 return 0; 1098 return 0;
995} 1099}
996 1100
@@ -1382,7 +1486,7 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, struct net_device *dev,
1382 1486
1383 priv->user_scan_cfg->chan_list[i].scan_time = 0; 1487 priv->user_scan_cfg->chan_list[i].scan_time = 0;
1384 } 1488 }
1385 if (mwifiex_set_user_scan_ioctl(priv, priv->user_scan_cfg)) 1489 if (mwifiex_scan_networks(priv, priv->user_scan_cfg))
1386 return -EFAULT; 1490 return -EFAULT;
1387 1491
1388 if (request->ie && request->ie_len) { 1492 if (request->ie && request->ie_len) {
@@ -1656,7 +1760,9 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
1656 .set_bitrate_mask = mwifiex_cfg80211_set_bitrate_mask, 1760 .set_bitrate_mask = mwifiex_cfg80211_set_bitrate_mask,
1657 .start_ap = mwifiex_cfg80211_start_ap, 1761 .start_ap = mwifiex_cfg80211_start_ap,
1658 .stop_ap = mwifiex_cfg80211_stop_ap, 1762 .stop_ap = mwifiex_cfg80211_stop_ap,
1763 .change_beacon = mwifiex_cfg80211_change_beacon,
1659 .set_cqm_rssi_config = mwifiex_cfg80211_set_cqm_rssi_config, 1764 .set_cqm_rssi_config = mwifiex_cfg80211_set_cqm_rssi_config,
1765 .set_antenna = mwifiex_cfg80211_set_antenna,
1660}; 1766};
1661 1767
1662/* 1768/*
@@ -1703,7 +1809,14 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
1703 1809
1704 memcpy(wiphy->perm_addr, priv->curr_addr, ETH_ALEN); 1810 memcpy(wiphy->perm_addr, priv->curr_addr, ETH_ALEN);
1705 wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; 1811 wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
1706 wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME | WIPHY_FLAG_CUSTOM_REGULATORY; 1812 wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
1813 WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
1814
1815 wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
1816 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2;
1817
1818 wiphy->available_antennas_tx = BIT(adapter->number_of_antenna) - 1;
1819 wiphy->available_antennas_rx = BIT(adapter->number_of_antenna) - 1;
1707 1820
1708 /* Reserve space for mwifiex specific private data for BSS */ 1821 /* Reserve space for mwifiex specific private data for BSS */
1709 wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv); 1822 wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv);
@@ -1714,7 +1827,7 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
1714 wdev_priv = wiphy_priv(wiphy); 1827 wdev_priv = wiphy_priv(wiphy);
1715 *(unsigned long *)wdev_priv = (unsigned long)adapter; 1828 *(unsigned long *)wdev_priv = (unsigned long)adapter;
1716 1829
1717 set_wiphy_dev(wiphy, (struct device *)priv->adapter->dev); 1830 set_wiphy_dev(wiphy, priv->adapter->dev);
1718 1831
1719 ret = wiphy_register(wiphy); 1832 ret = wiphy_register(wiphy);
1720 if (ret < 0) { 1833 if (ret < 0) {
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 51e023ec1de4..c68adec3cc8b 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -578,6 +578,7 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
578 } else { 578 } else {
579 adapter->cmd_queued = cmd_node; 579 adapter->cmd_queued = cmd_node;
580 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true); 580 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
581 queue_work(adapter->workqueue, &adapter->main_work);
581 } 582 }
582 583
583 return ret; 584 return ret;
@@ -1102,7 +1103,8 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
1102 &resp->params.opt_hs_cfg; 1103 &resp->params.opt_hs_cfg;
1103 uint32_t conditions = le32_to_cpu(phs_cfg->params.hs_config.conditions); 1104 uint32_t conditions = le32_to_cpu(phs_cfg->params.hs_config.conditions);
1104 1105
1105 if (phs_cfg->action == cpu_to_le16(HS_ACTIVATE)) { 1106 if (phs_cfg->action == cpu_to_le16(HS_ACTIVATE) &&
1107 adapter->iface_type == MWIFIEX_SDIO) {
1106 mwifiex_hs_activated_event(priv, true); 1108 mwifiex_hs_activated_event(priv, true);
1107 return 0; 1109 return 0;
1108 } else { 1110 } else {
@@ -1114,6 +1116,9 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
1114 } 1116 }
1115 if (conditions != HOST_SLEEP_CFG_CANCEL) { 1117 if (conditions != HOST_SLEEP_CFG_CANCEL) {
1116 adapter->is_hs_configured = true; 1118 adapter->is_hs_configured = true;
1119 if (adapter->iface_type == MWIFIEX_USB ||
1120 adapter->iface_type == MWIFIEX_PCIE)
1121 mwifiex_hs_activated_event(priv, true);
1117 } else { 1122 } else {
1118 adapter->is_hs_configured = false; 1123 adapter->is_hs_configured = false;
1119 if (adapter->hs_activated) 1124 if (adapter->hs_activated)
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 561452a5c818..14e985d01dee 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -124,6 +124,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
124#define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45) 124#define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45)
125#define TLV_TYPE_UAP_BCAST_SSID (PROPRIETARY_TLV_BASE_ID + 48) 125#define TLV_TYPE_UAP_BCAST_SSID (PROPRIETARY_TLV_BASE_ID + 48)
126#define TLV_TYPE_UAP_RTS_THRESHOLD (PROPRIETARY_TLV_BASE_ID + 51) 126#define TLV_TYPE_UAP_RTS_THRESHOLD (PROPRIETARY_TLV_BASE_ID + 51)
127#define TLV_TYPE_UAP_WEP_KEY (PROPRIETARY_TLV_BASE_ID + 59)
127#define TLV_TYPE_UAP_WPA_PASSPHRASE (PROPRIETARY_TLV_BASE_ID + 60) 128#define TLV_TYPE_UAP_WPA_PASSPHRASE (PROPRIETARY_TLV_BASE_ID + 60)
128#define TLV_TYPE_UAP_ENCRY_PROTOCOL (PROPRIETARY_TLV_BASE_ID + 64) 129#define TLV_TYPE_UAP_ENCRY_PROTOCOL (PROPRIETARY_TLV_BASE_ID + 64)
129#define TLV_TYPE_UAP_AKMP (PROPRIETARY_TLV_BASE_ID + 65) 130#define TLV_TYPE_UAP_AKMP (PROPRIETARY_TLV_BASE_ID + 65)
@@ -162,6 +163,12 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
162 163
163#define ISSUPP_11NENABLED(FwCapInfo) (FwCapInfo & BIT(11)) 164#define ISSUPP_11NENABLED(FwCapInfo) (FwCapInfo & BIT(11))
164 165
166#define MWIFIEX_DEF_HT_CAP (IEEE80211_HT_CAP_DSSSCCK40 | \
167 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) | \
168 IEEE80211_HT_CAP_SM_PS)
169
170#define MWIFIEX_DEF_AMPDU IEEE80211_HT_AMPDU_PARM_FACTOR
171
165/* dev_cap bitmap 172/* dev_cap bitmap
166 * BIT 173 * BIT
167 * 0-16 reserved 174 * 0-16 reserved
@@ -219,6 +226,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
219#define HostCmd_CMD_RF_REG_ACCESS 0x001b 226#define HostCmd_CMD_RF_REG_ACCESS 0x001b
220#define HostCmd_CMD_PMIC_REG_ACCESS 0x00ad 227#define HostCmd_CMD_PMIC_REG_ACCESS 0x00ad
221#define HostCmd_CMD_802_11_RF_CHANNEL 0x001d 228#define HostCmd_CMD_802_11_RF_CHANNEL 0x001d
229#define HostCmd_CMD_RF_TX_PWR 0x001e
230#define HostCmd_CMD_RF_ANTENNA 0x0020
222#define HostCmd_CMD_802_11_DEAUTHENTICATE 0x0024 231#define HostCmd_CMD_802_11_DEAUTHENTICATE 0x0024
223#define HostCmd_CMD_MAC_CONTROL 0x0028 232#define HostCmd_CMD_MAC_CONTROL 0x0028
224#define HostCmd_CMD_802_11_AD_HOC_START 0x002b 233#define HostCmd_CMD_802_11_AD_HOC_START 0x002b
@@ -314,6 +323,12 @@ enum ENH_PS_MODES {
314 323
315#define HostCmd_BSS_TYPE_MASK 0xf000 324#define HostCmd_BSS_TYPE_MASK 0xf000
316 325
326#define HostCmd_ACT_SET_RX 0x0001
327#define HostCmd_ACT_SET_TX 0x0002
328#define HostCmd_ACT_SET_BOTH 0x0003
329
330#define RF_ANTENNA_AUTO 0xFFFF
331
317#define HostCmd_SET_SEQ_NO_BSS_INFO(seq, num, type) { \ 332#define HostCmd_SET_SEQ_NO_BSS_INFO(seq, num, type) { \
318 (((seq) & 0x00ff) | \ 333 (((seq) & 0x00ff) | \
319 (((num) & 0x000f) << 8)) | \ 334 (((num) & 0x000f) << 8)) | \
@@ -869,6 +884,25 @@ struct host_cmd_ds_txpwr_cfg {
869 __le32 mode; 884 __le32 mode;
870} __packed; 885} __packed;
871 886
887struct host_cmd_ds_rf_tx_pwr {
888 __le16 action;
889 __le16 cur_level;
890 u8 max_power;
891 u8 min_power;
892} __packed;
893
894struct host_cmd_ds_rf_ant_mimo {
895 __le16 action_tx;
896 __le16 tx_ant_mode;
897 __le16 action_rx;
898 __le16 rx_ant_mode;
899};
900
901struct host_cmd_ds_rf_ant_siso {
902 __le16 action;
903 __le16 ant_mode;
904};
905
872struct mwifiex_bcn_param { 906struct mwifiex_bcn_param {
873 u8 bssid[ETH_ALEN]; 907 u8 bssid[ETH_ALEN];
874 u8 rssi; 908 u8 rssi;
@@ -1195,6 +1229,13 @@ struct host_cmd_tlv_passphrase {
1195 u8 passphrase[0]; 1229 u8 passphrase[0];
1196} __packed; 1230} __packed;
1197 1231
1232struct host_cmd_tlv_wep_key {
1233 struct host_cmd_tlv tlv;
1234 u8 key_index;
1235 u8 is_default;
1236 u8 key[1];
1237};
1238
1198struct host_cmd_tlv_auth_type { 1239struct host_cmd_tlv_auth_type {
1199 struct host_cmd_tlv tlv; 1240 struct host_cmd_tlv tlv;
1200 u8 auth_type; 1241 u8 auth_type;
@@ -1347,6 +1388,9 @@ struct host_cmd_ds_command {
1347 struct host_cmd_ds_tx_rate_query tx_rate; 1388 struct host_cmd_ds_tx_rate_query tx_rate;
1348 struct host_cmd_ds_tx_rate_cfg tx_rate_cfg; 1389 struct host_cmd_ds_tx_rate_cfg tx_rate_cfg;
1349 struct host_cmd_ds_txpwr_cfg txp_cfg; 1390 struct host_cmd_ds_txpwr_cfg txp_cfg;
1391 struct host_cmd_ds_rf_tx_pwr txp;
1392 struct host_cmd_ds_rf_ant_mimo ant_mimo;
1393 struct host_cmd_ds_rf_ant_siso ant_siso;
1350 struct host_cmd_ds_802_11_ps_mode_enh psmode_enh; 1394 struct host_cmd_ds_802_11_ps_mode_enh psmode_enh;
1351 struct host_cmd_ds_802_11_hs_cfg_enh opt_hs_cfg; 1395 struct host_cmd_ds_802_11_hs_cfg_enh opt_hs_cfg;
1352 struct host_cmd_ds_802_11_scan scan; 1396 struct host_cmd_ds_802_11_scan scan;
diff --git a/drivers/net/wireless/mwifiex/ie.c b/drivers/net/wireless/mwifiex/ie.c
index 383820a52beb..1d8dd003e396 100644
--- a/drivers/net/wireless/mwifiex/ie.c
+++ b/drivers/net/wireless/mwifiex/ie.c
@@ -51,8 +51,7 @@ mwifiex_ie_get_autoidx(struct mwifiex_private *priv, u16 subtype_mask,
51 51
52 for (i = 0; i < priv->adapter->max_mgmt_ie_index; i++) { 52 for (i = 0; i < priv->adapter->max_mgmt_ie_index; i++) {
53 mask = le16_to_cpu(priv->mgmt_ie[i].mgmt_subtype_mask); 53 mask = le16_to_cpu(priv->mgmt_ie[i].mgmt_subtype_mask);
54 len = le16_to_cpu(priv->mgmt_ie[i].ie_length) + 54 len = le16_to_cpu(ie->ie_length);
55 le16_to_cpu(ie->ie_length);
56 55
57 if (mask == MWIFIEX_AUTO_IDX_MASK) 56 if (mask == MWIFIEX_AUTO_IDX_MASK)
58 continue; 57 continue;
@@ -108,10 +107,8 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv,
108 return -1; 107 return -1;
109 108
110 tmp = (u8 *)&priv->mgmt_ie[index].ie_buffer; 109 tmp = (u8 *)&priv->mgmt_ie[index].ie_buffer;
111 tmp += le16_to_cpu(priv->mgmt_ie[index].ie_length);
112 memcpy(tmp, &ie->ie_buffer, le16_to_cpu(ie->ie_length)); 110 memcpy(tmp, &ie->ie_buffer, le16_to_cpu(ie->ie_length));
113 le16_add_cpu(&priv->mgmt_ie[index].ie_length, 111 priv->mgmt_ie[index].ie_length = ie->ie_length;
114 le16_to_cpu(ie->ie_length));
115 priv->mgmt_ie[index].ie_index = cpu_to_le16(index); 112 priv->mgmt_ie[index].ie_index = cpu_to_le16(index);
116 priv->mgmt_ie[index].mgmt_subtype_mask = 113 priv->mgmt_ie[index].mgmt_subtype_mask =
117 cpu_to_le16(mask); 114 cpu_to_le16(mask);
@@ -217,92 +214,63 @@ mwifiex_update_uap_custom_ie(struct mwifiex_private *priv,
217 return ret; 214 return ret;
218} 215}
219 216
220/* This function parses different IEs- Tail IEs, beacon IEs, probe response IEs, 217/* This function checks if WPS IE is present in passed buffer and copies it to
221 * association response IEs from cfg80211_ap_settings function and sets these IE 218 * mwifiex_ie structure.
222 * to FW. 219 * Function takes pointer to struct mwifiex_ie pointer as argument.
220 * If WPS IE is present memory is allocated for mwifiex_ie pointer and filled
221 * in with WPS IE. Caller should take care of freeing this memory.
223 */ 222 */
224int mwifiex_set_mgmt_ies(struct mwifiex_private *priv, 223static int mwifiex_update_wps_ie(const u8 *ies, int ies_len,
225 struct cfg80211_ap_settings *params) 224 struct mwifiex_ie **ie_ptr, u16 mask)
226{ 225{
227 struct mwifiex_ie *beacon_ie = NULL, *pr_ie = NULL; 226 struct ieee_types_header *wps_ie;
228 struct mwifiex_ie *ar_ie = NULL, *rsn_ie = NULL; 227 struct mwifiex_ie *ie = NULL;
229 struct ieee_types_header *ie = NULL; 228 const u8 *vendor_ie;
230 u16 beacon_idx = MWIFIEX_AUTO_IDX_MASK, pr_idx = MWIFIEX_AUTO_IDX_MASK; 229
231 u16 ar_idx = MWIFIEX_AUTO_IDX_MASK, rsn_idx = MWIFIEX_AUTO_IDX_MASK; 230 vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
232 u16 mask; 231 WLAN_OUI_TYPE_MICROSOFT_WPS,
233 int ret = 0; 232 ies, ies_len);
234 233 if (vendor_ie) {
235 if (params->beacon.tail && params->beacon.tail_len) { 234 ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
236 ie = (void *)cfg80211_find_ie(WLAN_EID_RSN, params->beacon.tail, 235 if (!ie)
237 params->beacon.tail_len); 236 return -ENOMEM;
238 if (ie) {
239 rsn_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
240 if (!rsn_ie)
241 return -ENOMEM;
242
243 rsn_ie->ie_index = cpu_to_le16(rsn_idx);
244 mask = MGMT_MASK_BEACON | MGMT_MASK_PROBE_RESP |
245 MGMT_MASK_ASSOC_RESP;
246 rsn_ie->mgmt_subtype_mask = cpu_to_le16(mask);
247 rsn_ie->ie_length = cpu_to_le16(ie->len + 2);
248 memcpy(rsn_ie->ie_buffer, ie, ie->len + 2);
249
250 if (mwifiex_update_uap_custom_ie(priv, rsn_ie, &rsn_idx,
251 NULL, NULL,
252 NULL, NULL)) {
253 ret = -1;
254 goto done;
255 }
256 237
257 priv->rsn_idx = rsn_idx; 238 wps_ie = (struct ieee_types_header *)vendor_ie;
258 } 239 memcpy(ie->ie_buffer, wps_ie, wps_ie->len + 2);
240 ie->ie_length = cpu_to_le16(wps_ie->len + 2);
241 ie->mgmt_subtype_mask = cpu_to_le16(mask);
242 ie->ie_index = cpu_to_le16(MWIFIEX_AUTO_IDX_MASK);
259 } 243 }
260 244
261 if (params->beacon.beacon_ies && params->beacon.beacon_ies_len) { 245 *ie_ptr = ie;
262 beacon_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL); 246 return 0;
263 if (!beacon_ie) { 247}
264 ret = -ENOMEM;
265 goto done;
266 }
267
268 beacon_ie->ie_index = cpu_to_le16(beacon_idx);
269 beacon_ie->mgmt_subtype_mask = cpu_to_le16(MGMT_MASK_BEACON);
270 beacon_ie->ie_length =
271 cpu_to_le16(params->beacon.beacon_ies_len);
272 memcpy(beacon_ie->ie_buffer, params->beacon.beacon_ies,
273 params->beacon.beacon_ies_len);
274 }
275 248
276 if (params->beacon.proberesp_ies && params->beacon.proberesp_ies_len) { 249/* This function parses beacon IEs, probe response IEs, association response IEs
277 pr_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL); 250 * from cfg80211_ap_settings->beacon and sets these IE to FW.
278 if (!pr_ie) { 251 */
279 ret = -ENOMEM; 252static int mwifiex_set_mgmt_beacon_data_ies(struct mwifiex_private *priv,
280 goto done; 253 struct cfg80211_beacon_data *data)
281 } 254{
255 struct mwifiex_ie *beacon_ie = NULL, *pr_ie = NULL, *ar_ie = NULL;
256 u16 beacon_idx = MWIFIEX_AUTO_IDX_MASK, pr_idx = MWIFIEX_AUTO_IDX_MASK;
257 u16 ar_idx = MWIFIEX_AUTO_IDX_MASK;
258 int ret = 0;
282 259
283 pr_ie->ie_index = cpu_to_le16(pr_idx); 260 if (data->beacon_ies && data->beacon_ies_len)
284 pr_ie->mgmt_subtype_mask = cpu_to_le16(MGMT_MASK_PROBE_RESP); 261 mwifiex_update_wps_ie(data->beacon_ies, data->beacon_ies_len,
285 pr_ie->ie_length = 262 &beacon_ie, MGMT_MASK_BEACON);
286 cpu_to_le16(params->beacon.proberesp_ies_len);
287 memcpy(pr_ie->ie_buffer, params->beacon.proberesp_ies,
288 params->beacon.proberesp_ies_len);
289 }
290 263
291 if (params->beacon.assocresp_ies && params->beacon.assocresp_ies_len) { 264 if (data->proberesp_ies && data->proberesp_ies_len)
292 ar_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL); 265 mwifiex_update_wps_ie(data->proberesp_ies,
293 if (!ar_ie) { 266 data->proberesp_ies_len, &pr_ie,
294 ret = -ENOMEM; 267 MGMT_MASK_PROBE_RESP);
295 goto done;
296 }
297 268
298 ar_ie->ie_index = cpu_to_le16(ar_idx); 269 if (data->assocresp_ies && data->assocresp_ies_len)
299 mask = MGMT_MASK_ASSOC_RESP | MGMT_MASK_REASSOC_RESP; 270 mwifiex_update_wps_ie(data->assocresp_ies,
300 ar_ie->mgmt_subtype_mask = cpu_to_le16(mask); 271 data->assocresp_ies_len, &ar_ie,
301 ar_ie->ie_length = 272 MGMT_MASK_ASSOC_RESP |
302 cpu_to_le16(params->beacon.assocresp_ies_len); 273 MGMT_MASK_REASSOC_RESP);
303 memcpy(ar_ie->ie_buffer, params->beacon.assocresp_ies,
304 params->beacon.assocresp_ies_len);
305 }
306 274
307 if (beacon_ie || pr_ie || ar_ie) { 275 if (beacon_ie || pr_ie || ar_ie) {
308 ret = mwifiex_update_uap_custom_ie(priv, beacon_ie, 276 ret = mwifiex_update_uap_custom_ie(priv, beacon_ie,
@@ -320,11 +288,67 @@ done:
320 kfree(beacon_ie); 288 kfree(beacon_ie);
321 kfree(pr_ie); 289 kfree(pr_ie);
322 kfree(ar_ie); 290 kfree(ar_ie);
323 kfree(rsn_ie);
324 291
325 return ret; 292 return ret;
326} 293}
327 294
295/* This function parses different IEs-tail IEs, beacon IEs, probe response IEs,
296 * association response IEs from cfg80211_ap_settings function and sets these IE
297 * to FW.
298 */
299int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
300 struct cfg80211_beacon_data *info)
301{
302 struct mwifiex_ie *gen_ie;
303 struct ieee_types_header *rsn_ie, *wpa_ie = NULL;
304 u16 rsn_idx = MWIFIEX_AUTO_IDX_MASK, ie_len = 0;
305 const u8 *vendor_ie;
306
307 if (info->tail && info->tail_len) {
308 gen_ie = kzalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
309 if (!gen_ie)
310 return -ENOMEM;
311 gen_ie->ie_index = cpu_to_le16(rsn_idx);
312 gen_ie->mgmt_subtype_mask = cpu_to_le16(MGMT_MASK_BEACON |
313 MGMT_MASK_PROBE_RESP |
314 MGMT_MASK_ASSOC_RESP);
315
316 rsn_ie = (void *)cfg80211_find_ie(WLAN_EID_RSN,
317 info->tail, info->tail_len);
318 if (rsn_ie) {
319 memcpy(gen_ie->ie_buffer, rsn_ie, rsn_ie->len + 2);
320 ie_len = rsn_ie->len + 2;
321 gen_ie->ie_length = cpu_to_le16(ie_len);
322 }
323
324 vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
325 WLAN_OUI_TYPE_MICROSOFT_WPA,
326 info->tail,
327 info->tail_len);
328 if (vendor_ie) {
329 wpa_ie = (struct ieee_types_header *)vendor_ie;
330 memcpy(gen_ie->ie_buffer + ie_len,
331 wpa_ie, wpa_ie->len + 2);
332 ie_len += wpa_ie->len + 2;
333 gen_ie->ie_length = cpu_to_le16(ie_len);
334 }
335
336 if (rsn_ie || wpa_ie) {
337 if (mwifiex_update_uap_custom_ie(priv, gen_ie, &rsn_idx,
338 NULL, NULL,
339 NULL, NULL)) {
340 kfree(gen_ie);
341 return -1;
342 }
343 priv->rsn_idx = rsn_idx;
344 }
345
346 kfree(gen_ie);
347 }
348
349 return mwifiex_set_mgmt_beacon_data_ies(priv, info);
350}
351
328/* This function removes management IE set */ 352/* This function removes management IE set */
329int mwifiex_del_mgmt_ies(struct mwifiex_private *priv) 353int mwifiex_del_mgmt_ies(struct mwifiex_private *priv)
330{ 354{
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index c1cb004db913..b543a4d82ff3 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -57,6 +57,69 @@ static int mwifiex_add_bss_prio_tbl(struct mwifiex_private *priv)
57 return 0; 57 return 0;
58} 58}
59 59
60static void scan_delay_timer_fn(unsigned long data)
61{
62 struct mwifiex_private *priv = (struct mwifiex_private *)data;
63 struct mwifiex_adapter *adapter = priv->adapter;
64 struct cmd_ctrl_node *cmd_node, *tmp_node;
65 unsigned long flags;
66
67 if (!mwifiex_wmm_lists_empty(adapter)) {
68 if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) {
69 /*
70 * Abort scan operation by cancelling all pending scan
71 * command
72 */
73 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
74 list_for_each_entry_safe(cmd_node, tmp_node,
75 &adapter->scan_pending_q,
76 list) {
77 list_del(&cmd_node->list);
78 cmd_node->wait_q_enabled = false;
79 mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
80 }
81 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
82 flags);
83
84 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
85 adapter->scan_processing = false;
86 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock,
87 flags);
88
89 if (priv->user_scan_cfg) {
90 dev_dbg(priv->adapter->dev,
91 "info: %s: scan aborted\n", __func__);
92 cfg80211_scan_done(priv->scan_request, 1);
93 priv->scan_request = NULL;
94 kfree(priv->user_scan_cfg);
95 priv->user_scan_cfg = NULL;
96 }
97 } else {
98 /*
99 * Tx data queue is still not empty, delay scan
100 * operation further by 20msec.
101 */
102 mod_timer(&priv->scan_delay_timer, jiffies +
103 msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
104 adapter->scan_delay_cnt++;
105 }
106 queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
107 } else {
108 /*
109 * Tx data queue is empty. Get scan command from scan_pending_q
110 * and put to cmd_pending_q to resume scan operation
111 */
112 adapter->scan_delay_cnt = 0;
113 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
114 cmd_node = list_first_entry(&adapter->scan_pending_q,
115 struct cmd_ctrl_node, list);
116 list_del(&cmd_node->list);
117 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
118
119 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
120 }
121}
122
60/* 123/*
61 * This function initializes the private structure and sets default 124 * This function initializes the private structure and sets default
62 * values to the members. 125 * values to the members.
@@ -136,6 +199,9 @@ static int mwifiex_init_priv(struct mwifiex_private *priv)
136 199
137 priv->scan_block = false; 200 priv->scan_block = false;
138 201
202 setup_timer(&priv->scan_delay_timer, scan_delay_timer_fn,
203 (unsigned long)priv);
204
139 return mwifiex_add_bss_prio_tbl(priv); 205 return mwifiex_add_bss_prio_tbl(priv);
140} 206}
141 207
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index e6be6ee75951..e121294cc1ac 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -21,6 +21,7 @@
21#define _MWIFIEX_IOCTL_H_ 21#define _MWIFIEX_IOCTL_H_
22 22
23#include <net/mac80211.h> 23#include <net/mac80211.h>
24#include <net/lib80211.h>
24 25
25enum { 26enum {
26 MWIFIEX_SCAN_TYPE_UNCHANGED = 0, 27 MWIFIEX_SCAN_TYPE_UNCHANGED = 0,
@@ -71,6 +72,13 @@ struct wpa_param {
71 u8 passphrase[MWIFIEX_WPA_PASSHPHRASE_LEN]; 72 u8 passphrase[MWIFIEX_WPA_PASSHPHRASE_LEN];
72}; 73};
73 74
75struct wep_key {
76 u8 key_index;
77 u8 is_default;
78 u16 length;
79 u8 key[WLAN_KEY_LEN_WEP104];
80};
81
74#define KEY_MGMT_ON_HOST 0x03 82#define KEY_MGMT_ON_HOST 0x03
75#define MWIFIEX_AUTH_MODE_AUTO 0xFF 83#define MWIFIEX_AUTH_MODE_AUTO 0xFF
76#define BAND_CONFIG_MANUAL 0x00 84#define BAND_CONFIG_MANUAL 0x00
@@ -90,6 +98,8 @@ struct mwifiex_uap_bss_param {
90 u16 key_mgmt; 98 u16 key_mgmt;
91 u16 key_mgmt_operation; 99 u16 key_mgmt_operation;
92 struct wpa_param wpa_cfg; 100 struct wpa_param wpa_cfg;
101 struct wep_key wep_cfg[NUM_WEP_KEYS];
102 struct ieee80211_ht_cap ht_cap;
93}; 103};
94 104
95enum { 105enum {
@@ -267,6 +277,11 @@ struct mwifiex_ds_11n_amsdu_aggr_ctrl {
267 u16 curr_buf_size; 277 u16 curr_buf_size;
268}; 278};
269 279
280struct mwifiex_ds_ant_cfg {
281 u32 tx_ant;
282 u32 rx_ant;
283};
284
270#define MWIFIEX_NUM_OF_CMD_BUFFER 20 285#define MWIFIEX_NUM_OF_CMD_BUFFER 20
271#define MWIFIEX_SIZE_OF_CMD_BUFFER 2048 286#define MWIFIEX_SIZE_OF_CMD_BUFFER 2048
272 287
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index d6b4fb04011f..82e63cee1e97 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -1349,22 +1349,16 @@ static int mwifiex_deauthenticate_infra(struct mwifiex_private *priv, u8 *mac)
1349{ 1349{
1350 u8 mac_address[ETH_ALEN]; 1350 u8 mac_address[ETH_ALEN];
1351 int ret; 1351 int ret;
1352 u8 zero_mac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
1353 1352
1354 if (mac) { 1353 if (!mac || is_zero_ether_addr(mac))
1355 if (!memcmp(mac, zero_mac, sizeof(zero_mac))) 1354 memcpy(mac_address,
1356 memcpy((u8 *) &mac_address, 1355 priv->curr_bss_params.bss_descriptor.mac_address,
1357 (u8 *) &priv->curr_bss_params.bss_descriptor. 1356 ETH_ALEN);
1358 mac_address, ETH_ALEN); 1357 else
1359 else 1358 memcpy(mac_address, mac, ETH_ALEN);
1360 memcpy((u8 *) &mac_address, (u8 *) mac, ETH_ALEN);
1361 } else {
1362 memcpy((u8 *) &mac_address, (u8 *) &priv->curr_bss_params.
1363 bss_descriptor.mac_address, ETH_ALEN);
1364 }
1365 1359
1366 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_DEAUTHENTICATE, 1360 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_DEAUTHENTICATE,
1367 HostCmd_ACT_GEN_SET, 0, &mac_address); 1361 HostCmd_ACT_GEN_SET, 0, mac_address);
1368 1362
1369 return ret; 1363 return ret;
1370} 1364}
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 3192855c31c0..f0219efc8953 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -190,7 +190,8 @@ process_start:
190 adapter->tx_lock_flag) 190 adapter->tx_lock_flag)
191 break; 191 break;
192 192
193 if (adapter->scan_processing || adapter->data_sent || 193 if ((adapter->scan_processing &&
194 !adapter->scan_delay_cnt) || adapter->data_sent ||
194 mwifiex_wmm_lists_empty(adapter)) { 195 mwifiex_wmm_lists_empty(adapter)) {
195 if (adapter->cmd_sent || adapter->curr_cmd || 196 if (adapter->cmd_sent || adapter->curr_cmd ||
196 (!is_command_pending(adapter))) 197 (!is_command_pending(adapter)))
@@ -244,8 +245,8 @@ process_start:
244 } 245 }
245 } 246 }
246 247
247 if (!adapter->scan_processing && !adapter->data_sent && 248 if ((!adapter->scan_processing || adapter->scan_delay_cnt) &&
248 !mwifiex_wmm_lists_empty(adapter)) { 249 !adapter->data_sent && !mwifiex_wmm_lists_empty(adapter)) {
249 mwifiex_wmm_process_tx(adapter); 250 mwifiex_wmm_process_tx(adapter);
250 if (adapter->hs_activated) { 251 if (adapter->hs_activated) {
251 adapter->is_hs_configured = false; 252 adapter->is_hs_configured = false;
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index bd3b0bf94b9e..9e636535cbf6 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -79,14 +79,17 @@ enum {
79 79
80#define SCAN_BEACON_ENTRY_PAD 6 80#define SCAN_BEACON_ENTRY_PAD 6
81 81
82#define MWIFIEX_PASSIVE_SCAN_CHAN_TIME 200 82#define MWIFIEX_PASSIVE_SCAN_CHAN_TIME 110
83#define MWIFIEX_ACTIVE_SCAN_CHAN_TIME 200 83#define MWIFIEX_ACTIVE_SCAN_CHAN_TIME 30
84#define MWIFIEX_SPECIFIC_SCAN_CHAN_TIME 110 84#define MWIFIEX_SPECIFIC_SCAN_CHAN_TIME 30
85 85
86#define SCAN_RSSI(RSSI) (0x100 - ((u8)(RSSI))) 86#define SCAN_RSSI(RSSI) (0x100 - ((u8)(RSSI)))
87 87
88#define MWIFIEX_MAX_TOTAL_SCAN_TIME (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S) 88#define MWIFIEX_MAX_TOTAL_SCAN_TIME (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S)
89 89
90#define MWIFIEX_MAX_SCAN_DELAY_CNT 50
91#define MWIFIEX_SCAN_DELAY_MSEC 20
92
90#define RSN_GTK_OUI_OFFSET 2 93#define RSN_GTK_OUI_OFFSET 2
91 94
92#define MWIFIEX_OUI_NOT_PRESENT 0 95#define MWIFIEX_OUI_NOT_PRESENT 0
@@ -482,6 +485,7 @@ struct mwifiex_private {
482 u16 proberesp_idx; 485 u16 proberesp_idx;
483 u16 assocresp_idx; 486 u16 assocresp_idx;
484 u16 rsn_idx; 487 u16 rsn_idx;
488 struct timer_list scan_delay_timer;
485}; 489};
486 490
487enum mwifiex_ba_status { 491enum mwifiex_ba_status {
@@ -686,6 +690,7 @@ struct mwifiex_adapter {
686 struct completion fw_load; 690 struct completion fw_load;
687 u8 country_code[IEEE80211_COUNTRY_STRING_LEN]; 691 u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
688 u16 max_mgmt_ie_index; 692 u16 max_mgmt_ie_index;
693 u8 scan_delay_cnt;
689}; 694};
690 695
691int mwifiex_init_lock_list(struct mwifiex_adapter *adapter); 696int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
@@ -835,6 +840,9 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv,
835int mwifiex_set_secure_params(struct mwifiex_private *priv, 840int mwifiex_set_secure_params(struct mwifiex_private *priv,
836 struct mwifiex_uap_bss_param *bss_config, 841 struct mwifiex_uap_bss_param *bss_config,
837 struct cfg80211_ap_settings *params); 842 struct cfg80211_ap_settings *params);
843void mwifiex_set_ht_params(struct mwifiex_private *priv,
844 struct mwifiex_uap_bss_param *bss_cfg,
845 struct cfg80211_ap_settings *params);
838 846
839/* 847/*
840 * This function checks if the queuing is RA based or not. 848 * This function checks if the queuing is RA based or not.
@@ -941,8 +949,8 @@ int mwifiex_drv_get_data_rate(struct mwifiex_private *priv,
941 struct mwifiex_rate_cfg *rate); 949 struct mwifiex_rate_cfg *rate);
942int mwifiex_request_scan(struct mwifiex_private *priv, 950int mwifiex_request_scan(struct mwifiex_private *priv,
943 struct cfg80211_ssid *req_ssid); 951 struct cfg80211_ssid *req_ssid);
944int mwifiex_set_user_scan_ioctl(struct mwifiex_private *priv, 952int mwifiex_scan_networks(struct mwifiex_private *priv,
945 struct mwifiex_user_scan_cfg *scan_req); 953 const struct mwifiex_user_scan_cfg *user_scan_in);
946int mwifiex_set_radio(struct mwifiex_private *priv, u8 option); 954int mwifiex_set_radio(struct mwifiex_private *priv, u8 option);
947 955
948int mwifiex_drv_change_adhoc_chan(struct mwifiex_private *priv, u16 channel); 956int mwifiex_drv_change_adhoc_chan(struct mwifiex_private *priv, u16 channel);
@@ -985,7 +993,6 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
985 993
986int mwifiex_main_process(struct mwifiex_adapter *); 994int mwifiex_main_process(struct mwifiex_adapter *);
987 995
988int mwifiex_uap_set_channel(struct mwifiex_private *priv, int channel);
989int mwifiex_bss_set_channel(struct mwifiex_private *, 996int mwifiex_bss_set_channel(struct mwifiex_private *,
990 struct mwifiex_chan_freq_power *cfp); 997 struct mwifiex_chan_freq_power *cfp);
991int mwifiex_get_bss_info(struct mwifiex_private *, 998int mwifiex_get_bss_info(struct mwifiex_private *,
@@ -1006,7 +1013,7 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct net_device *dev);
1006void mwifiex_set_sys_config_invalid_data(struct mwifiex_uap_bss_param *config); 1013void mwifiex_set_sys_config_invalid_data(struct mwifiex_uap_bss_param *config);
1007 1014
1008int mwifiex_set_mgmt_ies(struct mwifiex_private *priv, 1015int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
1009 struct cfg80211_ap_settings *params); 1016 struct cfg80211_beacon_data *data);
1010int mwifiex_del_mgmt_ies(struct mwifiex_private *priv); 1017int mwifiex_del_mgmt_ies(struct mwifiex_private *priv);
1011u8 *mwifiex_11d_code_2_region(u8 code); 1018u8 *mwifiex_11d_code_2_region(u8 code);
1012 1019
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 74f045715723..04dc7ca4ac22 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -28,7 +28,10 @@
28/* The maximum number of channels the firmware can scan per command */ 28/* The maximum number of channels the firmware can scan per command */
29#define MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN 14 29#define MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN 14
30 30
31#define MWIFIEX_CHANNELS_PER_SCAN_CMD 4 31#define MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD 4
32#define MWIFIEX_LIMIT_1_CHANNEL_PER_SCAN_CMD 15
33#define MWIFIEX_LIMIT_2_CHANNELS_PER_SCAN_CMD 27
34#define MWIFIEX_LIMIT_3_CHANNELS_PER_SCAN_CMD 35
32 35
33/* Memory needed to store a max sized Channel List TLV for a firmware scan */ 36/* Memory needed to store a max sized Channel List TLV for a firmware scan */
34#define CHAN_TLV_MAX_SIZE (sizeof(struct mwifiex_ie_types_header) \ 37#define CHAN_TLV_MAX_SIZE (sizeof(struct mwifiex_ie_types_header) \
@@ -471,7 +474,7 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv,
471 * This routine is used for any scan that is not provided with a 474 * This routine is used for any scan that is not provided with a
472 * specific channel list to scan. 475 * specific channel list to scan.
473 */ 476 */
474static void 477static int
475mwifiex_scan_create_channel_list(struct mwifiex_private *priv, 478mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
476 const struct mwifiex_user_scan_cfg 479 const struct mwifiex_user_scan_cfg
477 *user_scan_in, 480 *user_scan_in,
@@ -528,6 +531,7 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
528 } 531 }
529 532
530 } 533 }
534 return chan_idx;
531} 535}
532 536
533/* 537/*
@@ -727,6 +731,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
727 u32 num_probes; 731 u32 num_probes;
728 u32 ssid_len; 732 u32 ssid_len;
729 u32 chan_idx; 733 u32 chan_idx;
734 u32 chan_num;
730 u32 scan_type; 735 u32 scan_type;
731 u16 scan_dur; 736 u16 scan_dur;
732 u8 channel; 737 u8 channel;
@@ -850,7 +855,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
850 if (*filtered_scan) 855 if (*filtered_scan)
851 *max_chan_per_scan = MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN; 856 *max_chan_per_scan = MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN;
852 else 857 else
853 *max_chan_per_scan = MWIFIEX_CHANNELS_PER_SCAN_CMD; 858 *max_chan_per_scan = MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD;
854 859
855 /* If the input config or adapter has the number of Probes set, 860 /* If the input config or adapter has the number of Probes set,
856 add tlv */ 861 add tlv */
@@ -962,13 +967,28 @@ mwifiex_config_scan(struct mwifiex_private *priv,
962 dev_dbg(adapter->dev, 967 dev_dbg(adapter->dev,
963 "info: Scan: Scanning current channel only\n"); 968 "info: Scan: Scanning current channel only\n");
964 } 969 }
965 970 chan_num = chan_idx;
966 } else { 971 } else {
967 dev_dbg(adapter->dev, 972 dev_dbg(adapter->dev,
968 "info: Scan: Creating full region channel list\n"); 973 "info: Scan: Creating full region channel list\n");
969 mwifiex_scan_create_channel_list(priv, user_scan_in, 974 chan_num = mwifiex_scan_create_channel_list(priv, user_scan_in,
970 scan_chan_list, 975 scan_chan_list,
971 *filtered_scan); 976 *filtered_scan);
977 }
978
979 /*
980 * In associated state we will reduce the number of channels scanned per
981 * scan command to avoid any traffic delay/loss. This number is decided
982 * based on total number of channels to be scanned due to constraints
983 * of command buffers.
984 */
985 if (priv->media_connected) {
986 if (chan_num < MWIFIEX_LIMIT_1_CHANNEL_PER_SCAN_CMD)
987 *max_chan_per_scan = 1;
988 else if (chan_num < MWIFIEX_LIMIT_2_CHANNELS_PER_SCAN_CMD)
989 *max_chan_per_scan = 2;
990 else if (chan_num < MWIFIEX_LIMIT_3_CHANNELS_PER_SCAN_CMD)
991 *max_chan_per_scan = 3;
972 } 992 }
973} 993}
974 994
@@ -1014,14 +1034,12 @@ mwifiex_ret_802_11_scan_get_tlv_ptrs(struct mwifiex_adapter *adapter,
1014 case TLV_TYPE_TSFTIMESTAMP: 1034 case TLV_TYPE_TSFTIMESTAMP:
1015 dev_dbg(adapter->dev, "info: SCAN_RESP: TSF " 1035 dev_dbg(adapter->dev, "info: SCAN_RESP: TSF "
1016 "timestamp TLV, len = %d\n", tlv_len); 1036 "timestamp TLV, len = %d\n", tlv_len);
1017 *tlv_data = (struct mwifiex_ie_types_data *) 1037 *tlv_data = current_tlv;
1018 current_tlv;
1019 break; 1038 break;
1020 case TLV_TYPE_CHANNELBANDLIST: 1039 case TLV_TYPE_CHANNELBANDLIST:
1021 dev_dbg(adapter->dev, "info: SCAN_RESP: channel" 1040 dev_dbg(adapter->dev, "info: SCAN_RESP: channel"
1022 " band list TLV, len = %d\n", tlv_len); 1041 " band list TLV, len = %d\n", tlv_len);
1023 *tlv_data = (struct mwifiex_ie_types_data *) 1042 *tlv_data = current_tlv;
1024 current_tlv;
1025 break; 1043 break;
1026 default: 1044 default:
1027 dev_err(adapter->dev, 1045 dev_err(adapter->dev,
@@ -1226,15 +1244,15 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
1226 bss_entry->beacon_buf); 1244 bss_entry->beacon_buf);
1227 break; 1245 break;
1228 case WLAN_EID_BSS_COEX_2040: 1246 case WLAN_EID_BSS_COEX_2040:
1229 bss_entry->bcn_bss_co_2040 = (u8 *) (current_ptr + 1247 bss_entry->bcn_bss_co_2040 = current_ptr +
1230 sizeof(struct ieee_types_header)); 1248 sizeof(struct ieee_types_header);
1231 bss_entry->bss_co_2040_offset = (u16) (current_ptr + 1249 bss_entry->bss_co_2040_offset = (u16) (current_ptr +
1232 sizeof(struct ieee_types_header) - 1250 sizeof(struct ieee_types_header) -
1233 bss_entry->beacon_buf); 1251 bss_entry->beacon_buf);
1234 break; 1252 break;
1235 case WLAN_EID_EXT_CAPABILITY: 1253 case WLAN_EID_EXT_CAPABILITY:
1236 bss_entry->bcn_ext_cap = (u8 *) (current_ptr + 1254 bss_entry->bcn_ext_cap = current_ptr +
1237 sizeof(struct ieee_types_header)); 1255 sizeof(struct ieee_types_header);
1238 bss_entry->ext_cap_offset = (u16) (current_ptr + 1256 bss_entry->ext_cap_offset = (u16) (current_ptr +
1239 sizeof(struct ieee_types_header) - 1257 sizeof(struct ieee_types_header) -
1240 bss_entry->beacon_buf); 1258 bss_entry->beacon_buf);
@@ -1276,8 +1294,8 @@ mwifiex_radio_type_to_band(u8 radio_type)
1276 * order to send the appropriate scan commands to firmware to populate or 1294 * order to send the appropriate scan commands to firmware to populate or
1277 * update the internal driver scan table. 1295 * update the internal driver scan table.
1278 */ 1296 */
1279static int mwifiex_scan_networks(struct mwifiex_private *priv, 1297int mwifiex_scan_networks(struct mwifiex_private *priv,
1280 const struct mwifiex_user_scan_cfg *user_scan_in) 1298 const struct mwifiex_user_scan_cfg *user_scan_in)
1281{ 1299{
1282 int ret = 0; 1300 int ret = 0;
1283 struct mwifiex_adapter *adapter = priv->adapter; 1301 struct mwifiex_adapter *adapter = priv->adapter;
@@ -1342,6 +1360,7 @@ static int mwifiex_scan_networks(struct mwifiex_private *priv,
1342 adapter->cmd_queued = cmd_node; 1360 adapter->cmd_queued = cmd_node;
1343 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, 1361 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
1344 true); 1362 true);
1363 queue_work(adapter->workqueue, &adapter->main_work);
1345 } else { 1364 } else {
1346 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, 1365 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
1347 flags); 1366 flags);
@@ -1358,26 +1377,6 @@ static int mwifiex_scan_networks(struct mwifiex_private *priv,
1358} 1377}
1359 1378
1360/* 1379/*
1361 * Sends IOCTL request to start a scan with user configurations.
1362 *
1363 * This function allocates the IOCTL request buffer, fills it
1364 * with requisite parameters and calls the IOCTL handler.
1365 *
1366 * Upon completion, it also generates a wireless event to notify
1367 * applications.
1368 */
1369int mwifiex_set_user_scan_ioctl(struct mwifiex_private *priv,
1370 struct mwifiex_user_scan_cfg *scan_req)
1371{
1372 int status;
1373
1374 status = mwifiex_scan_networks(priv, scan_req);
1375 queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
1376
1377 return status;
1378}
1379
1380/*
1381 * This function prepares a scan command to be sent to the firmware. 1380 * This function prepares a scan command to be sent to the firmware.
1382 * 1381 *
1383 * This uses the scan command configuration sent to the command processing 1382 * This uses the scan command configuration sent to the command processing
@@ -1683,8 +1682,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
1683 goto done; 1682 goto done;
1684 } 1683 }
1685 if (element_id == WLAN_EID_DS_PARAMS) { 1684 if (element_id == WLAN_EID_DS_PARAMS) {
1686 channel = *(u8 *) (current_ptr + 1685 channel = *(current_ptr + sizeof(struct ieee_types_header));
1687 sizeof(struct ieee_types_header));
1688 break; 1686 break;
1689 } 1687 }
1690 1688
@@ -1772,14 +1770,23 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
1772 priv->user_scan_cfg = NULL; 1770 priv->user_scan_cfg = NULL;
1773 } 1771 }
1774 } else { 1772 } else {
1775 /* Get scan command from scan_pending_q and put to 1773 if (!mwifiex_wmm_lists_empty(adapter)) {
1776 cmd_pending_q */ 1774 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
1777 cmd_node = list_first_entry(&adapter->scan_pending_q, 1775 flags);
1778 struct cmd_ctrl_node, list); 1776 adapter->scan_delay_cnt = 1;
1779 list_del(&cmd_node->list); 1777 mod_timer(&priv->scan_delay_timer, jiffies +
1780 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); 1778 msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
1781 1779 } else {
1782 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true); 1780 /* Get scan command from scan_pending_q and put to
1781 cmd_pending_q */
1782 cmd_node = list_first_entry(&adapter->scan_pending_q,
1783 struct cmd_ctrl_node, list);
1784 list_del(&cmd_node->list);
1785 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
1786 flags);
1787 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
1788 true);
1789 }
1783 } 1790 }
1784 1791
1785done: 1792done:
@@ -2010,12 +2017,11 @@ mwifiex_save_curr_bcn(struct mwifiex_private *priv)
2010 2017
2011 if (curr_bss->bcn_bss_co_2040) 2018 if (curr_bss->bcn_bss_co_2040)
2012 curr_bss->bcn_bss_co_2040 = 2019 curr_bss->bcn_bss_co_2040 =
2013 (u8 *) (curr_bss->beacon_buf + 2020 (curr_bss->beacon_buf + curr_bss->bss_co_2040_offset);
2014 curr_bss->bss_co_2040_offset);
2015 2021
2016 if (curr_bss->bcn_ext_cap) 2022 if (curr_bss->bcn_ext_cap)
2017 curr_bss->bcn_ext_cap = (u8 *) (curr_bss->beacon_buf + 2023 curr_bss->bcn_ext_cap = curr_bss->beacon_buf +
2018 curr_bss->ext_cap_offset); 2024 curr_bss->ext_cap_offset;
2019} 2025}
2020 2026
2021/* 2027/*
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 40e025da6bc2..225d4c776177 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -260,6 +260,56 @@ static int mwifiex_cmd_tx_power_cfg(struct host_cmd_ds_command *cmd,
260} 260}
261 261
262/* 262/*
263 * This function prepares command to get RF Tx power.
264 */
265static int mwifiex_cmd_rf_tx_power(struct mwifiex_private *priv,
266 struct host_cmd_ds_command *cmd,
267 u16 cmd_action, void *data_buf)
268{
269 struct host_cmd_ds_rf_tx_pwr *txp = &cmd->params.txp;
270
271 cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_rf_tx_pwr)
272 + S_DS_GEN);
273 cmd->command = cpu_to_le16(HostCmd_CMD_RF_TX_PWR);
274 txp->action = cpu_to_le16(cmd_action);
275
276 return 0;
277}
278
279/*
280 * This function prepares command to set rf antenna.
281 */
282static int mwifiex_cmd_rf_antenna(struct mwifiex_private *priv,
283 struct host_cmd_ds_command *cmd,
284 u16 cmd_action,
285 struct mwifiex_ds_ant_cfg *ant_cfg)
286{
287 struct host_cmd_ds_rf_ant_mimo *ant_mimo = &cmd->params.ant_mimo;
288 struct host_cmd_ds_rf_ant_siso *ant_siso = &cmd->params.ant_siso;
289
290 cmd->command = cpu_to_le16(HostCmd_CMD_RF_ANTENNA);
291
292 if (cmd_action != HostCmd_ACT_GEN_SET)
293 return 0;
294
295 if (priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2) {
296 cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_rf_ant_mimo) +
297 S_DS_GEN);
298 ant_mimo->action_tx = cpu_to_le16(HostCmd_ACT_SET_TX);
299 ant_mimo->tx_ant_mode = cpu_to_le16((u16)ant_cfg->tx_ant);
300 ant_mimo->action_rx = cpu_to_le16(HostCmd_ACT_SET_RX);
301 ant_mimo->rx_ant_mode = cpu_to_le16((u16)ant_cfg->rx_ant);
302 } else {
303 cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_rf_ant_siso) +
304 S_DS_GEN);
305 ant_siso->action = cpu_to_le16(HostCmd_ACT_SET_BOTH);
306 ant_siso->ant_mode = cpu_to_le16((u16)ant_cfg->tx_ant);
307 }
308
309 return 0;
310}
311
312/*
263 * This function prepares command to set Host Sleep configuration. 313 * This function prepares command to set Host Sleep configuration.
264 * 314 *
265 * Preparation includes - 315 * Preparation includes -
@@ -793,8 +843,7 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
793 struct host_cmd_ds_mac_reg_access *mac_reg; 843 struct host_cmd_ds_mac_reg_access *mac_reg;
794 844
795 cmd->size = cpu_to_le16(sizeof(*mac_reg) + S_DS_GEN); 845 cmd->size = cpu_to_le16(sizeof(*mac_reg) + S_DS_GEN);
796 mac_reg = (struct host_cmd_ds_mac_reg_access *) &cmd-> 846 mac_reg = &cmd->params.mac_reg;
797 params.mac_reg;
798 mac_reg->action = cpu_to_le16(cmd_action); 847 mac_reg->action = cpu_to_le16(cmd_action);
799 mac_reg->offset = 848 mac_reg->offset =
800 cpu_to_le16((u16) le32_to_cpu(reg_rw->offset)); 849 cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
@@ -806,8 +855,7 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
806 struct host_cmd_ds_bbp_reg_access *bbp_reg; 855 struct host_cmd_ds_bbp_reg_access *bbp_reg;
807 856
808 cmd->size = cpu_to_le16(sizeof(*bbp_reg) + S_DS_GEN); 857 cmd->size = cpu_to_le16(sizeof(*bbp_reg) + S_DS_GEN);
809 bbp_reg = (struct host_cmd_ds_bbp_reg_access *) 858 bbp_reg = &cmd->params.bbp_reg;
810 &cmd->params.bbp_reg;
811 bbp_reg->action = cpu_to_le16(cmd_action); 859 bbp_reg->action = cpu_to_le16(cmd_action);
812 bbp_reg->offset = 860 bbp_reg->offset =
813 cpu_to_le16((u16) le32_to_cpu(reg_rw->offset)); 861 cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
@@ -819,8 +867,7 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
819 struct host_cmd_ds_rf_reg_access *rf_reg; 867 struct host_cmd_ds_rf_reg_access *rf_reg;
820 868
821 cmd->size = cpu_to_le16(sizeof(*rf_reg) + S_DS_GEN); 869 cmd->size = cpu_to_le16(sizeof(*rf_reg) + S_DS_GEN);
822 rf_reg = (struct host_cmd_ds_rf_reg_access *) 870 rf_reg = &cmd->params.rf_reg;
823 &cmd->params.rf_reg;
824 rf_reg->action = cpu_to_le16(cmd_action); 871 rf_reg->action = cpu_to_le16(cmd_action);
825 rf_reg->offset = cpu_to_le16((u16) le32_to_cpu(reg_rw->offset)); 872 rf_reg->offset = cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
826 rf_reg->value = (u8) le32_to_cpu(reg_rw->value); 873 rf_reg->value = (u8) le32_to_cpu(reg_rw->value);
@@ -831,8 +878,7 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
831 struct host_cmd_ds_pmic_reg_access *pmic_reg; 878 struct host_cmd_ds_pmic_reg_access *pmic_reg;
832 879
833 cmd->size = cpu_to_le16(sizeof(*pmic_reg) + S_DS_GEN); 880 cmd->size = cpu_to_le16(sizeof(*pmic_reg) + S_DS_GEN);
834 pmic_reg = (struct host_cmd_ds_pmic_reg_access *) &cmd-> 881 pmic_reg = &cmd->params.pmic_reg;
835 params.pmic_reg;
836 pmic_reg->action = cpu_to_le16(cmd_action); 882 pmic_reg->action = cpu_to_le16(cmd_action);
837 pmic_reg->offset = 883 pmic_reg->offset =
838 cpu_to_le16((u16) le32_to_cpu(reg_rw->offset)); 884 cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
@@ -844,8 +890,7 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
844 struct host_cmd_ds_rf_reg_access *cau_reg; 890 struct host_cmd_ds_rf_reg_access *cau_reg;
845 891
846 cmd->size = cpu_to_le16(sizeof(*cau_reg) + S_DS_GEN); 892 cmd->size = cpu_to_le16(sizeof(*cau_reg) + S_DS_GEN);
847 cau_reg = (struct host_cmd_ds_rf_reg_access *) 893 cau_reg = &cmd->params.rf_reg;
848 &cmd->params.rf_reg;
849 cau_reg->action = cpu_to_le16(cmd_action); 894 cau_reg->action = cpu_to_le16(cmd_action);
850 cau_reg->offset = 895 cau_reg->offset =
851 cpu_to_le16((u16) le32_to_cpu(reg_rw->offset)); 896 cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
@@ -856,7 +901,6 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
856 { 901 {
857 struct mwifiex_ds_read_eeprom *rd_eeprom = data_buf; 902 struct mwifiex_ds_read_eeprom *rd_eeprom = data_buf;
858 struct host_cmd_ds_802_11_eeprom_access *cmd_eeprom = 903 struct host_cmd_ds_802_11_eeprom_access *cmd_eeprom =
859 (struct host_cmd_ds_802_11_eeprom_access *)
860 &cmd->params.eeprom; 904 &cmd->params.eeprom;
861 905
862 cmd->size = cpu_to_le16(sizeof(*cmd_eeprom) + S_DS_GEN); 906 cmd->size = cpu_to_le16(sizeof(*cmd_eeprom) + S_DS_GEN);
@@ -1055,6 +1099,14 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
1055 ret = mwifiex_cmd_tx_power_cfg(cmd_ptr, cmd_action, 1099 ret = mwifiex_cmd_tx_power_cfg(cmd_ptr, cmd_action,
1056 data_buf); 1100 data_buf);
1057 break; 1101 break;
1102 case HostCmd_CMD_RF_TX_PWR:
1103 ret = mwifiex_cmd_rf_tx_power(priv, cmd_ptr, cmd_action,
1104 data_buf);
1105 break;
1106 case HostCmd_CMD_RF_ANTENNA:
1107 ret = mwifiex_cmd_rf_antenna(priv, cmd_ptr, cmd_action,
1108 data_buf);
1109 break;
1058 case HostCmd_CMD_802_11_PS_MODE_ENH: 1110 case HostCmd_CMD_802_11_PS_MODE_ENH:
1059 ret = mwifiex_cmd_enh_power_mode(priv, cmd_ptr, cmd_action, 1111 ret = mwifiex_cmd_enh_power_mode(priv, cmd_ptr, cmd_action,
1060 (uint16_t)cmd_oid, data_buf); 1112 (uint16_t)cmd_oid, data_buf);
@@ -1283,7 +1335,7 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
1283 priv->data_rate = 0; 1335 priv->data_rate = 0;
1284 1336
1285 /* get tx power */ 1337 /* get tx power */
1286 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_TXPWR_CFG, 1338 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_RF_TX_PWR,
1287 HostCmd_ACT_GEN_GET, 0, NULL); 1339 HostCmd_ACT_GEN_GET, 0, NULL);
1288 if (ret) 1340 if (ret)
1289 return -1; 1341 return -1;
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index a79ed9bd9695..97715dfbdf58 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -227,7 +227,7 @@ static int mwifiex_ret_get_log(struct mwifiex_private *priv,
227 struct mwifiex_ds_get_stats *stats) 227 struct mwifiex_ds_get_stats *stats)
228{ 228{
229 struct host_cmd_ds_802_11_get_log *get_log = 229 struct host_cmd_ds_802_11_get_log *get_log =
230 (struct host_cmd_ds_802_11_get_log *) &resp->params.get_log; 230 &resp->params.get_log;
231 231
232 if (stats) { 232 if (stats) {
233 stats->mcast_tx_frame = le32_to_cpu(get_log->mcast_tx_frame); 233 stats->mcast_tx_frame = le32_to_cpu(get_log->mcast_tx_frame);
@@ -282,7 +282,7 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv,
282 u32 i; 282 u32 i;
283 int ret = 0; 283 int ret = 0;
284 284
285 tlv_buf = (u8 *) ((u8 *) rate_cfg) + 285 tlv_buf = ((u8 *)rate_cfg) +
286 sizeof(struct host_cmd_ds_tx_rate_cfg); 286 sizeof(struct host_cmd_ds_tx_rate_cfg);
287 tlv_buf_len = *(u16 *) (tlv_buf + sizeof(u16)); 287 tlv_buf_len = *(u16 *) (tlv_buf + sizeof(u16));
288 288
@@ -451,6 +451,57 @@ static int mwifiex_ret_tx_power_cfg(struct mwifiex_private *priv,
451} 451}
452 452
453/* 453/*
454 * This function handles the command response of get RF Tx power.
455 */
456static int mwifiex_ret_rf_tx_power(struct mwifiex_private *priv,
457 struct host_cmd_ds_command *resp)
458{
459 struct host_cmd_ds_rf_tx_pwr *txp = &resp->params.txp;
460 u16 action = le16_to_cpu(txp->action);
461
462 priv->tx_power_level = le16_to_cpu(txp->cur_level);
463
464 if (action == HostCmd_ACT_GEN_GET) {
465 priv->max_tx_power_level = txp->max_power;
466 priv->min_tx_power_level = txp->min_power;
467 }
468
469 dev_dbg(priv->adapter->dev,
470 "Current TxPower Level=%d, Max Power=%d, Min Power=%d\n",
471 priv->tx_power_level, priv->max_tx_power_level,
472 priv->min_tx_power_level);
473
474 return 0;
475}
476
477/*
478 * This function handles the command response of set rf antenna
479 */
480static int mwifiex_ret_rf_antenna(struct mwifiex_private *priv,
481 struct host_cmd_ds_command *resp)
482{
483 struct host_cmd_ds_rf_ant_mimo *ant_mimo = &resp->params.ant_mimo;
484 struct host_cmd_ds_rf_ant_siso *ant_siso = &resp->params.ant_siso;
485 struct mwifiex_adapter *adapter = priv->adapter;
486
487 if (adapter->hw_dev_mcs_support == HT_STREAM_2X2)
488 dev_dbg(adapter->dev,
489 "RF_ANT_RESP: Tx action = 0x%x, Tx Mode = 0x%04x"
490 " Rx action = 0x%x, Rx Mode = 0x%04x\n",
491 le16_to_cpu(ant_mimo->action_tx),
492 le16_to_cpu(ant_mimo->tx_ant_mode),
493 le16_to_cpu(ant_mimo->action_rx),
494 le16_to_cpu(ant_mimo->rx_ant_mode));
495 else
496 dev_dbg(adapter->dev,
497 "RF_ANT_RESP: action = 0x%x, Mode = 0x%04x\n",
498 le16_to_cpu(ant_siso->action),
499 le16_to_cpu(ant_siso->ant_mode));
500
501 return 0;
502}
503
504/*
454 * This function handles the command response of set/get MAC address. 505 * This function handles the command response of set/get MAC address.
455 * 506 *
456 * Handling includes saving the MAC address in driver. 507 * Handling includes saving the MAC address in driver.
@@ -679,39 +730,33 @@ static int mwifiex_ret_reg_access(u16 type, struct host_cmd_ds_command *resp,
679 eeprom = data_buf; 730 eeprom = data_buf;
680 switch (type) { 731 switch (type) {
681 case HostCmd_CMD_MAC_REG_ACCESS: 732 case HostCmd_CMD_MAC_REG_ACCESS:
682 r.mac = (struct host_cmd_ds_mac_reg_access *) 733 r.mac = &resp->params.mac_reg;
683 &resp->params.mac_reg;
684 reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.mac->offset)); 734 reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.mac->offset));
685 reg_rw->value = r.mac->value; 735 reg_rw->value = r.mac->value;
686 break; 736 break;
687 case HostCmd_CMD_BBP_REG_ACCESS: 737 case HostCmd_CMD_BBP_REG_ACCESS:
688 r.bbp = (struct host_cmd_ds_bbp_reg_access *) 738 r.bbp = &resp->params.bbp_reg;
689 &resp->params.bbp_reg;
690 reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.bbp->offset)); 739 reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.bbp->offset));
691 reg_rw->value = cpu_to_le32((u32) r.bbp->value); 740 reg_rw->value = cpu_to_le32((u32) r.bbp->value);
692 break; 741 break;
693 742
694 case HostCmd_CMD_RF_REG_ACCESS: 743 case HostCmd_CMD_RF_REG_ACCESS:
695 r.rf = (struct host_cmd_ds_rf_reg_access *) 744 r.rf = &resp->params.rf_reg;
696 &resp->params.rf_reg;
697 reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.rf->offset)); 745 reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.rf->offset));
698 reg_rw->value = cpu_to_le32((u32) r.bbp->value); 746 reg_rw->value = cpu_to_le32((u32) r.bbp->value);
699 break; 747 break;
700 case HostCmd_CMD_PMIC_REG_ACCESS: 748 case HostCmd_CMD_PMIC_REG_ACCESS:
701 r.pmic = (struct host_cmd_ds_pmic_reg_access *) 749 r.pmic = &resp->params.pmic_reg;
702 &resp->params.pmic_reg;
703 reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.pmic->offset)); 750 reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.pmic->offset));
704 reg_rw->value = cpu_to_le32((u32) r.pmic->value); 751 reg_rw->value = cpu_to_le32((u32) r.pmic->value);
705 break; 752 break;
706 case HostCmd_CMD_CAU_REG_ACCESS: 753 case HostCmd_CMD_CAU_REG_ACCESS:
707 r.rf = (struct host_cmd_ds_rf_reg_access *) 754 r.rf = &resp->params.rf_reg;
708 &resp->params.rf_reg;
709 reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.rf->offset)); 755 reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.rf->offset));
710 reg_rw->value = cpu_to_le32((u32) r.rf->value); 756 reg_rw->value = cpu_to_le32((u32) r.rf->value);
711 break; 757 break;
712 case HostCmd_CMD_802_11_EEPROM_ACCESS: 758 case HostCmd_CMD_802_11_EEPROM_ACCESS:
713 r.eeprom = (struct host_cmd_ds_802_11_eeprom_access *) 759 r.eeprom = &resp->params.eeprom;
714 &resp->params.eeprom;
715 pr_debug("info: EEPROM read len=%x\n", r.eeprom->byte_count); 760 pr_debug("info: EEPROM read len=%x\n", r.eeprom->byte_count);
716 if (le16_to_cpu(eeprom->byte_count) < 761 if (le16_to_cpu(eeprom->byte_count) <
717 le16_to_cpu(r.eeprom->byte_count)) { 762 le16_to_cpu(r.eeprom->byte_count)) {
@@ -787,7 +832,7 @@ static int mwifiex_ret_subsc_evt(struct mwifiex_private *priv,
787 struct mwifiex_ds_misc_subsc_evt *sub_event) 832 struct mwifiex_ds_misc_subsc_evt *sub_event)
788{ 833{
789 struct host_cmd_ds_802_11_subsc_evt *cmd_sub_event = 834 struct host_cmd_ds_802_11_subsc_evt *cmd_sub_event =
790 (struct host_cmd_ds_802_11_subsc_evt *)&resp->params.subsc_evt; 835 &resp->params.subsc_evt;
791 836
792 /* For every subscribe event command (Get/Set/Clear), FW reports the 837 /* For every subscribe event command (Get/Set/Clear), FW reports the
793 * current set of subscribed events*/ 838 * current set of subscribed events*/
@@ -847,6 +892,12 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
847 case HostCmd_CMD_TXPWR_CFG: 892 case HostCmd_CMD_TXPWR_CFG:
848 ret = mwifiex_ret_tx_power_cfg(priv, resp); 893 ret = mwifiex_ret_tx_power_cfg(priv, resp);
849 break; 894 break;
895 case HostCmd_CMD_RF_TX_PWR:
896 ret = mwifiex_ret_rf_tx_power(priv, resp);
897 break;
898 case HostCmd_CMD_RF_ANTENNA:
899 ret = mwifiex_ret_rf_antenna(priv, resp);
900 break;
850 case HostCmd_CMD_802_11_PS_MODE_ENH: 901 case HostCmd_CMD_802_11_PS_MODE_ENH:
851 ret = mwifiex_ret_enh_power_mode(priv, resp, data_buf); 902 ret = mwifiex_ret_enh_power_mode(priv, resp, data_buf);
852 break; 903 break;
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index 11e731f3581c..b8614a825460 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -422,7 +422,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
422 422
423 if (len != -1) { 423 if (len != -1) {
424 sinfo.filled = STATION_INFO_ASSOC_REQ_IES; 424 sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
425 sinfo.assoc_req_ies = (u8 *)&event->data[len]; 425 sinfo.assoc_req_ies = &event->data[len];
426 len = (u8 *)sinfo.assoc_req_ies - 426 len = (u8 *)sinfo.assoc_req_ies -
427 (u8 *)&event->frame_control; 427 (u8 *)&event->frame_control;
428 sinfo.assoc_req_ies_len = 428 sinfo.assoc_req_ies_len =
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 106c449477b2..f2fd2423214f 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -66,9 +66,6 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
66 dev_dbg(adapter->dev, "cmd pending\n"); 66 dev_dbg(adapter->dev, "cmd pending\n");
67 atomic_inc(&adapter->cmd_pending); 67 atomic_inc(&adapter->cmd_pending);
68 68
69 /* Status pending, wake up main process */
70 queue_work(adapter->workqueue, &adapter->main_work);
71
72 /* Wait for completion */ 69 /* Wait for completion */
73 wait_event_interruptible(adapter->cmd_wait_q.wait, 70 wait_event_interruptible(adapter->cmd_wait_q.wait,
74 *(cmd_queued->condition)); 71 *(cmd_queued->condition));
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index 89f9a2a45de3..f40e93fe894a 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -26,6 +26,7 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
26 struct mwifiex_uap_bss_param *bss_config, 26 struct mwifiex_uap_bss_param *bss_config,
27 struct cfg80211_ap_settings *params) { 27 struct cfg80211_ap_settings *params) {
28 int i; 28 int i;
29 struct mwifiex_wep_key wep_key;
29 30
30 if (!params->privacy) { 31 if (!params->privacy) {
31 bss_config->protocol = PROTOCOL_NO_SECURITY; 32 bss_config->protocol = PROTOCOL_NO_SECURITY;
@@ -65,7 +66,7 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
65 } 66 }
66 if (params->crypto.wpa_versions & 67 if (params->crypto.wpa_versions &
67 NL80211_WPA_VERSION_2) { 68 NL80211_WPA_VERSION_2) {
68 bss_config->protocol = PROTOCOL_WPA2; 69 bss_config->protocol |= PROTOCOL_WPA2;
69 bss_config->key_mgmt = KEY_MGMT_EAP; 70 bss_config->key_mgmt = KEY_MGMT_EAP;
70 } 71 }
71 break; 72 break;
@@ -77,7 +78,7 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
77 } 78 }
78 if (params->crypto.wpa_versions & 79 if (params->crypto.wpa_versions &
79 NL80211_WPA_VERSION_2) { 80 NL80211_WPA_VERSION_2) {
80 bss_config->protocol = PROTOCOL_WPA2; 81 bss_config->protocol |= PROTOCOL_WPA2;
81 bss_config->key_mgmt = KEY_MGMT_PSK; 82 bss_config->key_mgmt = KEY_MGMT_PSK;
82 } 83 }
83 break; 84 break;
@@ -91,10 +92,19 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
91 case WLAN_CIPHER_SUITE_WEP104: 92 case WLAN_CIPHER_SUITE_WEP104:
92 break; 93 break;
93 case WLAN_CIPHER_SUITE_TKIP: 94 case WLAN_CIPHER_SUITE_TKIP:
94 bss_config->wpa_cfg.pairwise_cipher_wpa = CIPHER_TKIP; 95 if (params->crypto.wpa_versions & NL80211_WPA_VERSION_1)
96 bss_config->wpa_cfg.pairwise_cipher_wpa |=
97 CIPHER_TKIP;
98 if (params->crypto.wpa_versions & NL80211_WPA_VERSION_2)
99 bss_config->wpa_cfg.pairwise_cipher_wpa2 |=
100 CIPHER_TKIP;
95 break; 101 break;
96 case WLAN_CIPHER_SUITE_CCMP: 102 case WLAN_CIPHER_SUITE_CCMP:
97 bss_config->wpa_cfg.pairwise_cipher_wpa2 = 103 if (params->crypto.wpa_versions & NL80211_WPA_VERSION_1)
104 bss_config->wpa_cfg.pairwise_cipher_wpa |=
105 CIPHER_AES_CCMP;
106 if (params->crypto.wpa_versions & NL80211_WPA_VERSION_2)
107 bss_config->wpa_cfg.pairwise_cipher_wpa2 |=
98 CIPHER_AES_CCMP; 108 CIPHER_AES_CCMP;
99 default: 109 default:
100 break; 110 break;
@@ -104,6 +114,27 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
104 switch (params->crypto.cipher_group) { 114 switch (params->crypto.cipher_group) {
105 case WLAN_CIPHER_SUITE_WEP40: 115 case WLAN_CIPHER_SUITE_WEP40:
106 case WLAN_CIPHER_SUITE_WEP104: 116 case WLAN_CIPHER_SUITE_WEP104:
117 if (priv->sec_info.wep_enabled) {
118 bss_config->protocol = PROTOCOL_STATIC_WEP;
119 bss_config->key_mgmt = KEY_MGMT_NONE;
120 bss_config->wpa_cfg.length = 0;
121
122 for (i = 0; i < NUM_WEP_KEYS; i++) {
123 wep_key = priv->wep_key[i];
124 bss_config->wep_cfg[i].key_index = i;
125
126 if (priv->wep_key_curr_index == i)
127 bss_config->wep_cfg[i].is_default = 1;
128 else
129 bss_config->wep_cfg[i].is_default = 0;
130
131 bss_config->wep_cfg[i].length =
132 wep_key.key_length;
133 memcpy(&bss_config->wep_cfg[i].key,
134 &wep_key.key_material,
135 wep_key.key_length);
136 }
137 }
107 break; 138 break;
108 case WLAN_CIPHER_SUITE_TKIP: 139 case WLAN_CIPHER_SUITE_TKIP:
109 bss_config->wpa_cfg.group_cipher = CIPHER_TKIP; 140 bss_config->wpa_cfg.group_cipher = CIPHER_TKIP;
@@ -118,6 +149,33 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
118 return 0; 149 return 0;
119} 150}
120 151
152/* This function updates 11n related parameters from IE and sets them into
153 * bss_config structure.
154 */
155void
156mwifiex_set_ht_params(struct mwifiex_private *priv,
157 struct mwifiex_uap_bss_param *bss_cfg,
158 struct cfg80211_ap_settings *params)
159{
160 const u8 *ht_ie;
161
162 if (!ISSUPP_11NENABLED(priv->adapter->fw_cap_info))
163 return;
164
165 ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, params->beacon.tail,
166 params->beacon.tail_len);
167 if (ht_ie) {
168 memcpy(&bss_cfg->ht_cap, ht_ie + 2,
169 sizeof(struct ieee80211_ht_cap));
170 } else {
171 memset(&bss_cfg->ht_cap , 0, sizeof(struct ieee80211_ht_cap));
172 bss_cfg->ht_cap.cap_info = cpu_to_le16(MWIFIEX_DEF_HT_CAP);
173 bss_cfg->ht_cap.ampdu_params_info = MWIFIEX_DEF_AMPDU;
174 }
175
176 return;
177}
178
121/* This function initializes some of mwifiex_uap_bss_param variables. 179/* This function initializes some of mwifiex_uap_bss_param variables.
122 * This helps FW in ignoring invalid values. These values may or may not 180 * This helps FW in ignoring invalid values. These values may or may not
123 * be get updated to valid ones at later stage. 181 * be get updated to valid ones at later stage.
@@ -135,6 +193,120 @@ void mwifiex_set_sys_config_invalid_data(struct mwifiex_uap_bss_param *config)
135} 193}
136 194
137/* This function parses BSS related parameters from structure 195/* This function parses BSS related parameters from structure
196 * and prepares TLVs specific to WPA/WPA2 security.
197 * These TLVs are appended to command buffer.
198 */
199static void
200mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
201{
202 struct host_cmd_tlv_pwk_cipher *pwk_cipher;
203 struct host_cmd_tlv_gwk_cipher *gwk_cipher;
204 struct host_cmd_tlv_passphrase *passphrase;
205 struct host_cmd_tlv_akmp *tlv_akmp;
206 struct mwifiex_uap_bss_param *bss_cfg = cmd_buf;
207 u16 cmd_size = *param_size;
208 u8 *tlv = *tlv_buf;
209
210 tlv_akmp = (struct host_cmd_tlv_akmp *)tlv;
211 tlv_akmp->tlv.type = cpu_to_le16(TLV_TYPE_UAP_AKMP);
212 tlv_akmp->tlv.len = cpu_to_le16(sizeof(struct host_cmd_tlv_akmp) -
213 sizeof(struct host_cmd_tlv));
214 tlv_akmp->key_mgmt_operation = cpu_to_le16(bss_cfg->key_mgmt_operation);
215 tlv_akmp->key_mgmt = cpu_to_le16(bss_cfg->key_mgmt);
216 cmd_size += sizeof(struct host_cmd_tlv_akmp);
217 tlv += sizeof(struct host_cmd_tlv_akmp);
218
219 if (bss_cfg->wpa_cfg.pairwise_cipher_wpa & VALID_CIPHER_BITMAP) {
220 pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
221 pwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
222 pwk_cipher->tlv.len =
223 cpu_to_le16(sizeof(struct host_cmd_tlv_pwk_cipher) -
224 sizeof(struct host_cmd_tlv));
225 pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA);
226 pwk_cipher->cipher = bss_cfg->wpa_cfg.pairwise_cipher_wpa;
227 cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
228 tlv += sizeof(struct host_cmd_tlv_pwk_cipher);
229 }
230
231 if (bss_cfg->wpa_cfg.pairwise_cipher_wpa2 & VALID_CIPHER_BITMAP) {
232 pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
233 pwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
234 pwk_cipher->tlv.len =
235 cpu_to_le16(sizeof(struct host_cmd_tlv_pwk_cipher) -
236 sizeof(struct host_cmd_tlv));
237 pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA2);
238 pwk_cipher->cipher = bss_cfg->wpa_cfg.pairwise_cipher_wpa2;
239 cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
240 tlv += sizeof(struct host_cmd_tlv_pwk_cipher);
241 }
242
243 if (bss_cfg->wpa_cfg.group_cipher & VALID_CIPHER_BITMAP) {
244 gwk_cipher = (struct host_cmd_tlv_gwk_cipher *)tlv;
245 gwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_GWK_CIPHER);
246 gwk_cipher->tlv.len =
247 cpu_to_le16(sizeof(struct host_cmd_tlv_gwk_cipher) -
248 sizeof(struct host_cmd_tlv));
249 gwk_cipher->cipher = bss_cfg->wpa_cfg.group_cipher;
250 cmd_size += sizeof(struct host_cmd_tlv_gwk_cipher);
251 tlv += sizeof(struct host_cmd_tlv_gwk_cipher);
252 }
253
254 if (bss_cfg->wpa_cfg.length) {
255 passphrase = (struct host_cmd_tlv_passphrase *)tlv;
256 passphrase->tlv.type = cpu_to_le16(TLV_TYPE_UAP_WPA_PASSPHRASE);
257 passphrase->tlv.len = cpu_to_le16(bss_cfg->wpa_cfg.length);
258 memcpy(passphrase->passphrase, bss_cfg->wpa_cfg.passphrase,
259 bss_cfg->wpa_cfg.length);
260 cmd_size += sizeof(struct host_cmd_tlv) +
261 bss_cfg->wpa_cfg.length;
262 tlv += sizeof(struct host_cmd_tlv) + bss_cfg->wpa_cfg.length;
263 }
264
265 *param_size = cmd_size;
266 *tlv_buf = tlv;
267
268 return;
269}
270
271/* This function parses BSS related parameters from structure
272 * and prepares TLVs specific to WEP encryption.
273 * These TLVs are appended to command buffer.
274 */
275static void
276mwifiex_uap_bss_wep(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
277{
278 struct host_cmd_tlv_wep_key *wep_key;
279 u16 cmd_size = *param_size;
280 int i;
281 u8 *tlv = *tlv_buf;
282 struct mwifiex_uap_bss_param *bss_cfg = cmd_buf;
283
284 for (i = 0; i < NUM_WEP_KEYS; i++) {
285 if (bss_cfg->wep_cfg[i].length &&
286 (bss_cfg->wep_cfg[i].length == WLAN_KEY_LEN_WEP40 ||
287 bss_cfg->wep_cfg[i].length == WLAN_KEY_LEN_WEP104)) {
288 wep_key = (struct host_cmd_tlv_wep_key *)tlv;
289 wep_key->tlv.type = cpu_to_le16(TLV_TYPE_UAP_WEP_KEY);
290 wep_key->tlv.len =
291 cpu_to_le16(bss_cfg->wep_cfg[i].length + 2);
292 wep_key->key_index = bss_cfg->wep_cfg[i].key_index;
293 wep_key->is_default = bss_cfg->wep_cfg[i].is_default;
294 memcpy(wep_key->key, bss_cfg->wep_cfg[i].key,
295 bss_cfg->wep_cfg[i].length);
296 cmd_size += sizeof(struct host_cmd_tlv) + 2 +
297 bss_cfg->wep_cfg[i].length;
298 tlv += sizeof(struct host_cmd_tlv) + 2 +
299 bss_cfg->wep_cfg[i].length;
300 }
301 }
302
303 *param_size = cmd_size;
304 *tlv_buf = tlv;
305
306 return;
307}
308
309/* This function parses BSS related parameters from structure
138 * and prepares TLVs. These TLVs are appended to command buffer. 310 * and prepares TLVs. These TLVs are appended to command buffer.
139*/ 311*/
140static int 312static int
@@ -148,12 +320,9 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
148 struct host_cmd_tlv_frag_threshold *frag_threshold; 320 struct host_cmd_tlv_frag_threshold *frag_threshold;
149 struct host_cmd_tlv_rts_threshold *rts_threshold; 321 struct host_cmd_tlv_rts_threshold *rts_threshold;
150 struct host_cmd_tlv_retry_limit *retry_limit; 322 struct host_cmd_tlv_retry_limit *retry_limit;
151 struct host_cmd_tlv_pwk_cipher *pwk_cipher;
152 struct host_cmd_tlv_gwk_cipher *gwk_cipher;
153 struct host_cmd_tlv_encrypt_protocol *encrypt_protocol; 323 struct host_cmd_tlv_encrypt_protocol *encrypt_protocol;
154 struct host_cmd_tlv_auth_type *auth_type; 324 struct host_cmd_tlv_auth_type *auth_type;
155 struct host_cmd_tlv_passphrase *passphrase; 325 struct mwifiex_ie_types_htcap *htcap;
156 struct host_cmd_tlv_akmp *tlv_akmp;
157 struct mwifiex_uap_bss_param *bss_cfg = cmd_buf; 326 struct mwifiex_uap_bss_param *bss_cfg = cmd_buf;
158 u16 cmd_size = *param_size; 327 u16 cmd_size = *param_size;
159 328
@@ -243,70 +412,11 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
243 } 412 }
244 if ((bss_cfg->protocol & PROTOCOL_WPA) || 413 if ((bss_cfg->protocol & PROTOCOL_WPA) ||
245 (bss_cfg->protocol & PROTOCOL_WPA2) || 414 (bss_cfg->protocol & PROTOCOL_WPA2) ||
246 (bss_cfg->protocol & PROTOCOL_EAP)) { 415 (bss_cfg->protocol & PROTOCOL_EAP))
247 tlv_akmp = (struct host_cmd_tlv_akmp *)tlv; 416 mwifiex_uap_bss_wpa(&tlv, cmd_buf, &cmd_size);
248 tlv_akmp->tlv.type = cpu_to_le16(TLV_TYPE_UAP_AKMP); 417 else
249 tlv_akmp->tlv.len = 418 mwifiex_uap_bss_wep(&tlv, cmd_buf, &cmd_size);
250 cpu_to_le16(sizeof(struct host_cmd_tlv_akmp) - 419
251 sizeof(struct host_cmd_tlv));
252 tlv_akmp->key_mgmt_operation =
253 cpu_to_le16(bss_cfg->key_mgmt_operation);
254 tlv_akmp->key_mgmt = cpu_to_le16(bss_cfg->key_mgmt);
255 cmd_size += sizeof(struct host_cmd_tlv_akmp);
256 tlv += sizeof(struct host_cmd_tlv_akmp);
257
258 if (bss_cfg->wpa_cfg.pairwise_cipher_wpa &
259 VALID_CIPHER_BITMAP) {
260 pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
261 pwk_cipher->tlv.type =
262 cpu_to_le16(TLV_TYPE_PWK_CIPHER);
263 pwk_cipher->tlv.len = cpu_to_le16(
264 sizeof(struct host_cmd_tlv_pwk_cipher) -
265 sizeof(struct host_cmd_tlv));
266 pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA);
267 pwk_cipher->cipher =
268 bss_cfg->wpa_cfg.pairwise_cipher_wpa;
269 cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
270 tlv += sizeof(struct host_cmd_tlv_pwk_cipher);
271 }
272 if (bss_cfg->wpa_cfg.pairwise_cipher_wpa2 &
273 VALID_CIPHER_BITMAP) {
274 pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
275 pwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
276 pwk_cipher->tlv.len = cpu_to_le16(
277 sizeof(struct host_cmd_tlv_pwk_cipher) -
278 sizeof(struct host_cmd_tlv));
279 pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA2);
280 pwk_cipher->cipher =
281 bss_cfg->wpa_cfg.pairwise_cipher_wpa2;
282 cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
283 tlv += sizeof(struct host_cmd_tlv_pwk_cipher);
284 }
285 if (bss_cfg->wpa_cfg.group_cipher & VALID_CIPHER_BITMAP) {
286 gwk_cipher = (struct host_cmd_tlv_gwk_cipher *)tlv;
287 gwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_GWK_CIPHER);
288 gwk_cipher->tlv.len = cpu_to_le16(
289 sizeof(struct host_cmd_tlv_gwk_cipher) -
290 sizeof(struct host_cmd_tlv));
291 gwk_cipher->cipher = bss_cfg->wpa_cfg.group_cipher;
292 cmd_size += sizeof(struct host_cmd_tlv_gwk_cipher);
293 tlv += sizeof(struct host_cmd_tlv_gwk_cipher);
294 }
295 if (bss_cfg->wpa_cfg.length) {
296 passphrase = (struct host_cmd_tlv_passphrase *)tlv;
297 passphrase->tlv.type =
298 cpu_to_le16(TLV_TYPE_UAP_WPA_PASSPHRASE);
299 passphrase->tlv.len =
300 cpu_to_le16(bss_cfg->wpa_cfg.length);
301 memcpy(passphrase->passphrase,
302 bss_cfg->wpa_cfg.passphrase,
303 bss_cfg->wpa_cfg.length);
304 cmd_size += sizeof(struct host_cmd_tlv) +
305 bss_cfg->wpa_cfg.length;
306 tlv += sizeof(struct host_cmd_tlv) +
307 bss_cfg->wpa_cfg.length;
308 }
309 }
310 if ((bss_cfg->auth_mode <= WLAN_AUTH_SHARED_KEY) || 420 if ((bss_cfg->auth_mode <= WLAN_AUTH_SHARED_KEY) ||
311 (bss_cfg->auth_mode == MWIFIEX_AUTH_MODE_AUTO)) { 421 (bss_cfg->auth_mode == MWIFIEX_AUTH_MODE_AUTO)) {
312 auth_type = (struct host_cmd_tlv_auth_type *)tlv; 422 auth_type = (struct host_cmd_tlv_auth_type *)tlv;
@@ -330,6 +440,25 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
330 tlv += sizeof(struct host_cmd_tlv_encrypt_protocol); 440 tlv += sizeof(struct host_cmd_tlv_encrypt_protocol);
331 } 441 }
332 442
443 if (bss_cfg->ht_cap.cap_info) {
444 htcap = (struct mwifiex_ie_types_htcap *)tlv;
445 htcap->header.type = cpu_to_le16(WLAN_EID_HT_CAPABILITY);
446 htcap->header.len =
447 cpu_to_le16(sizeof(struct ieee80211_ht_cap));
448 htcap->ht_cap.cap_info = bss_cfg->ht_cap.cap_info;
449 htcap->ht_cap.ampdu_params_info =
450 bss_cfg->ht_cap.ampdu_params_info;
451 memcpy(&htcap->ht_cap.mcs, &bss_cfg->ht_cap.mcs,
452 sizeof(struct ieee80211_mcs_info));
453 htcap->ht_cap.extended_ht_cap_info =
454 bss_cfg->ht_cap.extended_ht_cap_info;
455 htcap->ht_cap.tx_BF_cap_info = bss_cfg->ht_cap.tx_BF_cap_info;
456 htcap->ht_cap.antenna_selection_info =
457 bss_cfg->ht_cap.antenna_selection_info;
458 cmd_size += sizeof(struct mwifiex_ie_types_htcap);
459 tlv += sizeof(struct mwifiex_ie_types_htcap);
460 }
461
333 *param_size = cmd_size; 462 *param_size = cmd_size;
334 463
335 return 0; 464 return 0;
@@ -421,33 +550,3 @@ int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, u16 cmd_no,
421 550
422 return 0; 551 return 0;
423} 552}
424
425/* This function sets the RF channel for AP.
426 *
427 * This function populates channel information in AP config structure
428 * and sends command to configure channel information in AP.
429 */
430int mwifiex_uap_set_channel(struct mwifiex_private *priv, int channel)
431{
432 struct mwifiex_uap_bss_param *bss_cfg;
433 struct wiphy *wiphy = priv->wdev->wiphy;
434
435 bss_cfg = kzalloc(sizeof(struct mwifiex_uap_bss_param), GFP_KERNEL);
436 if (!bss_cfg)
437 return -ENOMEM;
438
439 mwifiex_set_sys_config_invalid_data(bss_cfg);
440 bss_cfg->band_cfg = BAND_CONFIG_MANUAL;
441 bss_cfg->channel = channel;
442
443 if (mwifiex_send_cmd_async(priv, HostCmd_CMD_UAP_SYS_CONFIG,
444 HostCmd_ACT_GEN_SET,
445 UAP_BSS_PARAMS_I, bss_cfg)) {
446 wiphy_err(wiphy, "Failed to set the uAP channel\n");
447 kfree(bss_cfg);
448 return -1;
449 }
450
451 kfree(bss_cfg);
452 return 0;
453}
diff --git a/drivers/net/wireless/orinoco/cfg.c b/drivers/net/wireless/orinoco/cfg.c
index f7b15b8934fa..e15675585fb1 100644
--- a/drivers/net/wireless/orinoco/cfg.c
+++ b/drivers/net/wireless/orinoco/cfg.c
@@ -160,10 +160,9 @@ static int orinoco_scan(struct wiphy *wiphy, struct net_device *dev,
160 return err; 160 return err;
161} 161}
162 162
163static int orinoco_set_channel(struct wiphy *wiphy, 163static int orinoco_set_monitor_channel(struct wiphy *wiphy,
164 struct net_device *netdev, 164 struct ieee80211_channel *chan,
165 struct ieee80211_channel *chan, 165 enum nl80211_channel_type channel_type)
166 enum nl80211_channel_type channel_type)
167{ 166{
168 struct orinoco_private *priv = wiphy_priv(wiphy); 167 struct orinoco_private *priv = wiphy_priv(wiphy);
169 int err = 0; 168 int err = 0;
@@ -286,7 +285,7 @@ static int orinoco_set_wiphy_params(struct wiphy *wiphy, u32 changed)
286 285
287const struct cfg80211_ops orinoco_cfg_ops = { 286const struct cfg80211_ops orinoco_cfg_ops = {
288 .change_virtual_intf = orinoco_change_vif, 287 .change_virtual_intf = orinoco_change_vif,
289 .set_channel = orinoco_set_channel, 288 .set_monitor_channel = orinoco_set_monitor_channel,
290 .scan = orinoco_scan, 289 .scan = orinoco_scan,
291 .set_wiphy_params = orinoco_set_wiphy_params, 290 .set_wiphy_params = orinoco_set_wiphy_params,
292}; 291};
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index fa8ce5104781..14037092ba89 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -857,7 +857,7 @@ good_eeprom:
857 857
858 wiphy_warn(dev->wiphy, 858 wiphy_warn(dev->wiphy,
859 "Invalid hwaddr! Using randomly generated MAC addr\n"); 859 "Invalid hwaddr! Using randomly generated MAC addr\n");
860 random_ether_addr(perm_addr); 860 eth_random_addr(perm_addr);
861 SET_IEEE80211_PERM_ADDR(dev, perm_addr); 861 SET_IEEE80211_PERM_ADDR(dev, perm_addr);
862 } 862 }
863 863
@@ -905,7 +905,7 @@ int p54_read_eeprom(struct ieee80211_hw *dev)
905 905
906 while (eeprom_size) { 906 while (eeprom_size) {
907 blocksize = min(eeprom_size, maxblocksize); 907 blocksize = min(eeprom_size, maxblocksize);
908 ret = p54_download_eeprom(priv, (void *) (eeprom + offset), 908 ret = p54_download_eeprom(priv, eeprom + offset,
909 offset, blocksize); 909 offset, blocksize);
910 if (unlikely(ret)) 910 if (unlikely(ret))
911 goto free; 911 goto free;
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index 18e82b31afa6..9ba85106eec0 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -478,7 +478,7 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell)
478 478
479 if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) { 479 if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) {
480 memcpy(&body->longbow.curve_data, 480 memcpy(&body->longbow.curve_data,
481 (void *) entry + sizeof(__le16), 481 entry + sizeof(__le16),
482 priv->curve_data->entry_size); 482 priv->curve_data->entry_size);
483 } else { 483 } else {
484 struct p54_scan_body *chan = &body->normal; 484 struct p54_scan_body *chan = &body->normal;
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 82a1cac920bd..f38786e02623 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -422,11 +422,11 @@ static void p54_rx_frame_sent(struct p54_common *priv, struct sk_buff *skb)
422 * Clear manually, ieee80211_tx_info_clear_status would 422 * Clear manually, ieee80211_tx_info_clear_status would
423 * clear the counts too and we need them. 423 * clear the counts too and we need them.
424 */ 424 */
425 memset(&info->status.ampdu_ack_len, 0, 425 memset(&info->status.ack_signal, 0,
426 sizeof(struct ieee80211_tx_info) - 426 sizeof(struct ieee80211_tx_info) -
427 offsetof(struct ieee80211_tx_info, status.ampdu_ack_len)); 427 offsetof(struct ieee80211_tx_info, status.ack_signal));
428 BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, 428 BUILD_BUG_ON(offsetof(struct ieee80211_tx_info,
429 status.ampdu_ack_len) != 23); 429 status.ack_signal) != 20);
430 430
431 if (entry_hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN)) 431 if (entry_hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN))
432 pad = entry_data->align[0]; 432 pad = entry_data->align[0];
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index 266d45bf86f5..799e148d0370 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -455,7 +455,7 @@ islpci_eth_receive(islpci_private *priv)
455 "Error mapping DMA address\n"); 455 "Error mapping DMA address\n");
456 456
457 /* free the skbuf structure before aborting */ 457 /* free the skbuf structure before aborting */
458 dev_kfree_skb_irq((struct sk_buff *) skb); 458 dev_kfree_skb_irq(skb);
459 skb = NULL; 459 skb = NULL;
460 break; 460 break;
461 } 461 }
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 86a738bf591c..598ca1cafb95 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -1849,7 +1849,7 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
1849 pr_debug("ray_cs: interrupt for *dev=%p\n", dev); 1849 pr_debug("ray_cs: interrupt for *dev=%p\n", dev);
1850 1850
1851 local = netdev_priv(dev); 1851 local = netdev_priv(dev);
1852 link = (struct pcmcia_device *)local->finder; 1852 link = local->finder;
1853 if (!pcmcia_dev_present(link)) { 1853 if (!pcmcia_dev_present(link)) {
1854 pr_debug( 1854 pr_debug(
1855 "ray_cs interrupt from device not present or suspended.\n"); 1855 "ray_cs interrupt from device not present or suspended.\n");
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 299c3879582d..c7548da6573d 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -99,6 +99,14 @@ config RT2800PCI_RT53XX
99 rt2800pci driver. 99 rt2800pci driver.
100 Supported chips: RT5390 100 Supported chips: RT5390
101 101
102config RT2800PCI_RT3290
103 bool "rt2800pci - Include support for rt3290 devices (EXPERIMENTAL)"
104 depends on EXPERIMENTAL
105 default y
106 ---help---
107 This adds support for rt3290 wireless chipset family to the
108 rt2800pci driver.
109 Supported chips: RT3290
102endif 110endif
103 111
104config RT2500USB 112config RT2500USB
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 5e6b50143165..8b9dbd76a252 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -1455,7 +1455,7 @@ static int rt2400pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1455 */ 1455 */
1456 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); 1456 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
1457 if (!is_valid_ether_addr(mac)) { 1457 if (!is_valid_ether_addr(mac)) {
1458 random_ether_addr(mac); 1458 eth_random_addr(mac);
1459 EEPROM(rt2x00dev, "MAC: %pM\n", mac); 1459 EEPROM(rt2x00dev, "MAC: %pM\n", mac);
1460 } 1460 }
1461 1461
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 136b849f11b5..d2cf8a4bc8b5 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1585,7 +1585,7 @@ static int rt2500pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1585 */ 1585 */
1586 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); 1586 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
1587 if (!is_valid_ether_addr(mac)) { 1587 if (!is_valid_ether_addr(mac)) {
1588 random_ether_addr(mac); 1588 eth_random_addr(mac);
1589 EEPROM(rt2x00dev, "MAC: %pM\n", mac); 1589 EEPROM(rt2x00dev, "MAC: %pM\n", mac);
1590 } 1590 }
1591 1591
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 669aecdb411d..3aae36bb0a9e 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -1352,7 +1352,7 @@ static int rt2500usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1352 */ 1352 */
1353 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); 1353 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
1354 if (!is_valid_ether_addr(mac)) { 1354 if (!is_valid_ether_addr(mac)) {
1355 random_ether_addr(mac); 1355 eth_random_addr(mac);
1356 EEPROM(rt2x00dev, "MAC: %pM\n", mac); 1356 EEPROM(rt2x00dev, "MAC: %pM\n", mac);
1357 } 1357 }
1358 1358
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 9348521e0832..e252e9bafd0e 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -51,6 +51,7 @@
51 * RF3320 2.4G 1T1R(RT3350/RT3370/RT3390) 51 * RF3320 2.4G 1T1R(RT3350/RT3370/RT3390)
52 * RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392) 52 * RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392)
53 * RF3053 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662) 53 * RF3053 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
54 * RF5360 2.4G 1T1R
54 * RF5370 2.4G 1T1R 55 * RF5370 2.4G 1T1R
55 * RF5390 2.4G 1T1R 56 * RF5390 2.4G 1T1R
56 */ 57 */
@@ -67,9 +68,12 @@
67#define RF3320 0x000b 68#define RF3320 0x000b
68#define RF3322 0x000c 69#define RF3322 0x000c
69#define RF3053 0x000d 70#define RF3053 0x000d
71#define RF3290 0x3290
72#define RF5360 0x5360
70#define RF5370 0x5370 73#define RF5370 0x5370
71#define RF5372 0x5372 74#define RF5372 0x5372
72#define RF5390 0x5390 75#define RF5390 0x5390
76#define RF5392 0x5392
73 77
74/* 78/*
75 * Chipset revisions. 79 * Chipset revisions.
@@ -114,6 +118,12 @@
114 * Registers. 118 * Registers.
115 */ 119 */
116 120
121
122/*
123 * MAC_CSR0_3290: MAC_CSR0 for RT3290 to identity MAC version number.
124 */
125#define MAC_CSR0_3290 0x0000
126
117/* 127/*
118 * E2PROM_CSR: PCI EEPROM control register. 128 * E2PROM_CSR: PCI EEPROM control register.
119 * RELOAD: Write 1 to reload eeprom content. 129 * RELOAD: Write 1 to reload eeprom content.
@@ -130,6 +140,150 @@
130#define E2PROM_CSR_RELOAD FIELD32(0x00000080) 140#define E2PROM_CSR_RELOAD FIELD32(0x00000080)
131 141
132/* 142/*
143 * CMB_CTRL_CFG
144 */
145#define CMB_CTRL 0x0020
146#define AUX_OPT_BIT0 FIELD32(0x00000001)
147#define AUX_OPT_BIT1 FIELD32(0x00000002)
148#define AUX_OPT_BIT2 FIELD32(0x00000004)
149#define AUX_OPT_BIT3 FIELD32(0x00000008)
150#define AUX_OPT_BIT4 FIELD32(0x00000010)
151#define AUX_OPT_BIT5 FIELD32(0x00000020)
152#define AUX_OPT_BIT6 FIELD32(0x00000040)
153#define AUX_OPT_BIT7 FIELD32(0x00000080)
154#define AUX_OPT_BIT8 FIELD32(0x00000100)
155#define AUX_OPT_BIT9 FIELD32(0x00000200)
156#define AUX_OPT_BIT10 FIELD32(0x00000400)
157#define AUX_OPT_BIT11 FIELD32(0x00000800)
158#define AUX_OPT_BIT12 FIELD32(0x00001000)
159#define AUX_OPT_BIT13 FIELD32(0x00002000)
160#define AUX_OPT_BIT14 FIELD32(0x00004000)
161#define AUX_OPT_BIT15 FIELD32(0x00008000)
162#define LDO25_LEVEL FIELD32(0x00030000)
163#define LDO25_LARGEA FIELD32(0x00040000)
164#define LDO25_FRC_ON FIELD32(0x00080000)
165#define CMB_RSV FIELD32(0x00300000)
166#define XTAL_RDY FIELD32(0x00400000)
167#define PLL_LD FIELD32(0x00800000)
168#define LDO_CORE_LEVEL FIELD32(0x0F000000)
169#define LDO_BGSEL FIELD32(0x30000000)
170#define LDO3_EN FIELD32(0x40000000)
171#define LDO0_EN FIELD32(0x80000000)
172
173/*
174 * EFUSE_CSR_3290: RT3290 EEPROM
175 */
176#define EFUSE_CTRL_3290 0x0024
177
178/*
179 * EFUSE_DATA3 of 3290
180 */
181#define EFUSE_DATA3_3290 0x0028
182
183/*
184 * EFUSE_DATA2 of 3290
185 */
186#define EFUSE_DATA2_3290 0x002c
187
188/*
189 * EFUSE_DATA1 of 3290
190 */
191#define EFUSE_DATA1_3290 0x0030
192
193/*
194 * EFUSE_DATA0 of 3290
195 */
196#define EFUSE_DATA0_3290 0x0034
197
198/*
199 * OSC_CTRL_CFG
200 * Ring oscillator configuration
201 */
202#define OSC_CTRL 0x0038
203#define OSC_REF_CYCLE FIELD32(0x00001fff)
204#define OSC_RSV FIELD32(0x0000e000)
205#define OSC_CAL_CNT FIELD32(0x0fff0000)
206#define OSC_CAL_ACK FIELD32(0x10000000)
207#define OSC_CLK_32K_VLD FIELD32(0x20000000)
208#define OSC_CAL_REQ FIELD32(0x40000000)
209#define OSC_ROSC_EN FIELD32(0x80000000)
210
211/*
212 * COEX_CFG_0
213 */
214#define COEX_CFG0 0x0040
215#define COEX_CFG_ANT FIELD32(0xff000000)
216/*
217 * COEX_CFG_1
218 */
219#define COEX_CFG1 0x0044
220
221/*
222 * COEX_CFG_2
223 */
224#define COEX_CFG2 0x0048
225#define BT_COEX_CFG1 FIELD32(0xff000000)
226#define BT_COEX_CFG0 FIELD32(0x00ff0000)
227#define WL_COEX_CFG1 FIELD32(0x0000ff00)
228#define WL_COEX_CFG0 FIELD32(0x000000ff)
229/*
230 * PLL_CTRL_CFG
231 * PLL configuration register
232 */
233#define PLL_CTRL 0x0050
234#define PLL_RESERVED_INPUT1 FIELD32(0x000000ff)
235#define PLL_RESERVED_INPUT2 FIELD32(0x0000ff00)
236#define PLL_CONTROL FIELD32(0x00070000)
237#define PLL_LPF_R1 FIELD32(0x00080000)
238#define PLL_LPF_C1_CTRL FIELD32(0x00300000)
239#define PLL_LPF_C2_CTRL FIELD32(0x00c00000)
240#define PLL_CP_CURRENT_CTRL FIELD32(0x03000000)
241#define PLL_PFD_DELAY_CTRL FIELD32(0x0c000000)
242#define PLL_LOCK_CTRL FIELD32(0x70000000)
243#define PLL_VBGBK_EN FIELD32(0x80000000)
244
245
246/*
247 * WLAN_CTRL_CFG
248 * RT3290 wlan configuration
249 */
250#define WLAN_FUN_CTRL 0x0080
251#define WLAN_EN FIELD32(0x00000001)
252#define WLAN_CLK_EN FIELD32(0x00000002)
253#define WLAN_RSV1 FIELD32(0x00000004)
254#define WLAN_RESET FIELD32(0x00000008)
255#define PCIE_APP0_CLK_REQ FIELD32(0x00000010)
256#define FRC_WL_ANT_SET FIELD32(0x00000020)
257#define INV_TR_SW0 FIELD32(0x00000040)
258#define WLAN_GPIO_IN_BIT0 FIELD32(0x00000100)
259#define WLAN_GPIO_IN_BIT1 FIELD32(0x00000200)
260#define WLAN_GPIO_IN_BIT2 FIELD32(0x00000400)
261#define WLAN_GPIO_IN_BIT3 FIELD32(0x00000800)
262#define WLAN_GPIO_IN_BIT4 FIELD32(0x00001000)
263#define WLAN_GPIO_IN_BIT5 FIELD32(0x00002000)
264#define WLAN_GPIO_IN_BIT6 FIELD32(0x00004000)
265#define WLAN_GPIO_IN_BIT7 FIELD32(0x00008000)
266#define WLAN_GPIO_IN_BIT_ALL FIELD32(0x0000ff00)
267#define WLAN_GPIO_OUT_BIT0 FIELD32(0x00010000)
268#define WLAN_GPIO_OUT_BIT1 FIELD32(0x00020000)
269#define WLAN_GPIO_OUT_BIT2 FIELD32(0x00040000)
270#define WLAN_GPIO_OUT_BIT3 FIELD32(0x00050000)
271#define WLAN_GPIO_OUT_BIT4 FIELD32(0x00100000)
272#define WLAN_GPIO_OUT_BIT5 FIELD32(0x00200000)
273#define WLAN_GPIO_OUT_BIT6 FIELD32(0x00400000)
274#define WLAN_GPIO_OUT_BIT7 FIELD32(0x00800000)
275#define WLAN_GPIO_OUT_BIT_ALL FIELD32(0x00ff0000)
276#define WLAN_GPIO_OUT_OE_BIT0 FIELD32(0x01000000)
277#define WLAN_GPIO_OUT_OE_BIT1 FIELD32(0x02000000)
278#define WLAN_GPIO_OUT_OE_BIT2 FIELD32(0x04000000)
279#define WLAN_GPIO_OUT_OE_BIT3 FIELD32(0x08000000)
280#define WLAN_GPIO_OUT_OE_BIT4 FIELD32(0x10000000)
281#define WLAN_GPIO_OUT_OE_BIT5 FIELD32(0x20000000)
282#define WLAN_GPIO_OUT_OE_BIT6 FIELD32(0x40000000)
283#define WLAN_GPIO_OUT_OE_BIT7 FIELD32(0x80000000)
284#define WLAN_GPIO_OUT_OE_BIT_ALL FIELD32(0xff000000)
285
286/*
133 * AUX_CTRL: Aux/PCI-E related configuration 287 * AUX_CTRL: Aux/PCI-E related configuration
134 */ 288 */
135#define AUX_CTRL 0x10c 289#define AUX_CTRL 0x10c
@@ -1760,9 +1914,11 @@ struct mac_iveiv_entry {
1760/* 1914/*
1761 * BBP 3: RX Antenna 1915 * BBP 3: RX Antenna
1762 */ 1916 */
1763#define BBP3_RX_ADC FIELD8(0x03) 1917#define BBP3_RX_ADC FIELD8(0x03)
1764#define BBP3_RX_ANTENNA FIELD8(0x18) 1918#define BBP3_RX_ANTENNA FIELD8(0x18)
1765#define BBP3_HT40_MINUS FIELD8(0x20) 1919#define BBP3_HT40_MINUS FIELD8(0x20)
1920#define BBP3_ADC_MODE_SWITCH FIELD8(0x40)
1921#define BBP3_ADC_INIT_MODE FIELD8(0x80)
1766 1922
1767/* 1923/*
1768 * BBP 4: Bandwidth 1924 * BBP 4: Bandwidth
@@ -1772,6 +1928,14 @@ struct mac_iveiv_entry {
1772#define BBP4_MAC_IF_CTRL FIELD8(0x40) 1928#define BBP4_MAC_IF_CTRL FIELD8(0x40)
1773 1929
1774/* 1930/*
1931 * BBP 47: Bandwidth
1932 */
1933#define BBP47_TSSI_REPORT_SEL FIELD8(0x03)
1934#define BBP47_TSSI_UPDATE_REQ FIELD8(0x04)
1935#define BBP47_TSSI_TSSI_MODE FIELD8(0x18)
1936#define BBP47_TSSI_ADC6 FIELD8(0x80)
1937
1938/*
1775 * BBP 109 1939 * BBP 109
1776 */ 1940 */
1777#define BBP109_TX0_POWER FIELD8(0x0f) 1941#define BBP109_TX0_POWER FIELD8(0x0f)
@@ -1914,6 +2078,16 @@ struct mac_iveiv_entry {
1914#define RFCSR27_R4 FIELD8(0x40) 2078#define RFCSR27_R4 FIELD8(0x40)
1915 2079
1916/* 2080/*
2081 * RFCSR 29:
2082 */
2083#define RFCSR29_ADC6_TEST FIELD8(0x01)
2084#define RFCSR29_ADC6_INT_TEST FIELD8(0x02)
2085#define RFCSR29_RSSI_RESET FIELD8(0x04)
2086#define RFCSR29_RSSI_ON FIELD8(0x08)
2087#define RFCSR29_RSSI_RIP_CTRL FIELD8(0x30)
2088#define RFCSR29_RSSI_GAIN FIELD8(0xc0)
2089
2090/*
1917 * RFCSR 30: 2091 * RFCSR 30:
1918 */ 2092 */
1919#define RFCSR30_TX_H20M FIELD8(0x02) 2093#define RFCSR30_TX_H20M FIELD8(0x02)
@@ -1944,6 +2118,11 @@ struct mac_iveiv_entry {
1944#define RFCSR49_TX FIELD8(0x3f) 2118#define RFCSR49_TX FIELD8(0x3f)
1945 2119
1946/* 2120/*
2121 * RFCSR 50:
2122 */
2123#define RFCSR50_TX FIELD8(0x3f)
2124
2125/*
1947 * RF registers 2126 * RF registers
1948 */ 2127 */
1949 2128
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index dfc90d34be6d..88455b1b9fe0 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -354,16 +354,15 @@ int rt2800_check_firmware(struct rt2x00_dev *rt2x00dev,
354 * of 4kb. Certain USB chipsets however require different firmware, 354 * of 4kb. Certain USB chipsets however require different firmware,
355 * which Ralink only provides attached to the original firmware 355 * which Ralink only provides attached to the original firmware
356 * file. Thus for USB devices, firmware files have a length 356 * file. Thus for USB devices, firmware files have a length
357 * which is a multiple of 4kb. 357 * which is a multiple of 4kb. The firmware for rt3290 chip also
358 * have a length which is a multiple of 4kb.
358 */ 359 */
359 if (rt2x00_is_usb(rt2x00dev)) { 360 if (rt2x00_is_usb(rt2x00dev) || rt2x00_rt(rt2x00dev, RT3290))
360 fw_len = 4096; 361 fw_len = 4096;
361 multiple = true; 362 else
362 } else {
363 fw_len = 8192; 363 fw_len = 8192;
364 multiple = true;
365 }
366 364
365 multiple = true;
367 /* 366 /*
368 * Validate the firmware length 367 * Validate the firmware length
369 */ 368 */
@@ -415,7 +414,8 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
415 return -EBUSY; 414 return -EBUSY;
416 415
417 if (rt2x00_is_pci(rt2x00dev)) { 416 if (rt2x00_is_pci(rt2x00dev)) {
418 if (rt2x00_rt(rt2x00dev, RT3572) || 417 if (rt2x00_rt(rt2x00dev, RT3290) ||
418 rt2x00_rt(rt2x00dev, RT3572) ||
419 rt2x00_rt(rt2x00dev, RT5390) || 419 rt2x00_rt(rt2x00dev, RT5390) ||
420 rt2x00_rt(rt2x00dev, RT5392)) { 420 rt2x00_rt(rt2x00dev, RT5392)) {
421 rt2800_register_read(rt2x00dev, AUX_CTRL, &reg); 421 rt2800_register_read(rt2x00dev, AUX_CTRL, &reg);
@@ -851,8 +851,13 @@ int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev)
851{ 851{
852 u32 reg; 852 u32 reg;
853 853
854 rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg); 854 if (rt2x00_rt(rt2x00dev, RT3290)) {
855 return rt2x00_get_field32(reg, GPIO_CTRL_CFG_BIT2); 855 rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
856 return rt2x00_get_field32(reg, WLAN_GPIO_IN_BIT0);
857 } else {
858 rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
859 return rt2x00_get_field32(reg, GPIO_CTRL_CFG_BIT2);
860 }
856} 861}
857EXPORT_SYMBOL_GPL(rt2800_rfkill_poll); 862EXPORT_SYMBOL_GPL(rt2800_rfkill_poll);
858 863
@@ -1935,8 +1940,50 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
1935 rt2800_rfcsr_write(rt2x00dev, 7, rfcsr); 1940 rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
1936} 1941}
1937 1942
1938#define RT5390_POWER_BOUND 0x27 1943#define POWER_BOUND 0x27
1939#define RT5390_FREQ_OFFSET_BOUND 0x5f 1944#define FREQ_OFFSET_BOUND 0x5f
1945
1946static void rt2800_config_channel_rf3290(struct rt2x00_dev *rt2x00dev,
1947 struct ieee80211_conf *conf,
1948 struct rf_channel *rf,
1949 struct channel_info *info)
1950{
1951 u8 rfcsr;
1952
1953 rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1);
1954 rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3);
1955 rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr);
1956 rt2x00_set_field8(&rfcsr, RFCSR11_R, rf->rf2);
1957 rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
1958
1959 rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr);
1960 if (info->default_power1 > POWER_BOUND)
1961 rt2x00_set_field8(&rfcsr, RFCSR49_TX, POWER_BOUND);
1962 else
1963 rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1);
1964 rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
1965
1966 rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
1967 if (rt2x00dev->freq_offset > FREQ_OFFSET_BOUND)
1968 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, FREQ_OFFSET_BOUND);
1969 else
1970 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
1971 rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
1972
1973 if (rf->channel <= 14) {
1974 if (rf->channel == 6)
1975 rt2800_bbp_write(rt2x00dev, 68, 0x0c);
1976 else
1977 rt2800_bbp_write(rt2x00dev, 68, 0x0b);
1978
1979 if (rf->channel >= 1 && rf->channel <= 6)
1980 rt2800_bbp_write(rt2x00dev, 59, 0x0f);
1981 else if (rf->channel >= 7 && rf->channel <= 11)
1982 rt2800_bbp_write(rt2x00dev, 59, 0x0e);
1983 else if (rf->channel >= 12 && rf->channel <= 14)
1984 rt2800_bbp_write(rt2x00dev, 59, 0x0d);
1985 }
1986}
1940 1987
1941static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev, 1988static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
1942 struct ieee80211_conf *conf, 1989 struct ieee80211_conf *conf,
@@ -1952,13 +1999,27 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
1952 rt2800_rfcsr_write(rt2x00dev, 11, rfcsr); 1999 rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
1953 2000
1954 rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr); 2001 rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr);
1955 if (info->default_power1 > RT5390_POWER_BOUND) 2002 if (info->default_power1 > POWER_BOUND)
1956 rt2x00_set_field8(&rfcsr, RFCSR49_TX, RT5390_POWER_BOUND); 2003 rt2x00_set_field8(&rfcsr, RFCSR49_TX, POWER_BOUND);
1957 else 2004 else
1958 rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1); 2005 rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1);
1959 rt2800_rfcsr_write(rt2x00dev, 49, rfcsr); 2006 rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
1960 2007
2008 if (rt2x00_rt(rt2x00dev, RT5392)) {
2009 rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr);
2010 if (info->default_power1 > POWER_BOUND)
2011 rt2x00_set_field8(&rfcsr, RFCSR50_TX, POWER_BOUND);
2012 else
2013 rt2x00_set_field8(&rfcsr, RFCSR50_TX,
2014 info->default_power2);
2015 rt2800_rfcsr_write(rt2x00dev, 50, rfcsr);
2016 }
2017
1961 rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr); 2018 rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
2019 if (rt2x00_rt(rt2x00dev, RT5392)) {
2020 rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
2021 rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
2022 }
1962 rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1); 2023 rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
1963 rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1); 2024 rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1);
1964 rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1); 2025 rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
@@ -1966,9 +2027,8 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
1966 rt2800_rfcsr_write(rt2x00dev, 1, rfcsr); 2027 rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
1967 2028
1968 rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr); 2029 rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
1969 if (rt2x00dev->freq_offset > RT5390_FREQ_OFFSET_BOUND) 2030 if (rt2x00dev->freq_offset > FREQ_OFFSET_BOUND)
1970 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, 2031 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, FREQ_OFFSET_BOUND);
1971 RT5390_FREQ_OFFSET_BOUND);
1972 else 2032 else
1973 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset); 2033 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
1974 rt2800_rfcsr_write(rt2x00dev, 17, rfcsr); 2034 rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
@@ -2021,15 +2081,6 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
2021 } 2081 }
2022 } 2082 }
2023 } 2083 }
2024
2025 rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
2026 rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, 0);
2027 rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, 0);
2028 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
2029
2030 rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
2031 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
2032 rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
2033} 2084}
2034 2085
2035static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, 2086static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
@@ -2039,7 +2090,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
2039{ 2090{
2040 u32 reg; 2091 u32 reg;
2041 unsigned int tx_pin; 2092 unsigned int tx_pin;
2042 u8 bbp; 2093 u8 bbp, rfcsr;
2043 2094
2044 if (rf->channel <= 14) { 2095 if (rf->channel <= 14) {
2045 info->default_power1 = TXPOWER_G_TO_DEV(info->default_power1); 2096 info->default_power1 = TXPOWER_G_TO_DEV(info->default_power1);
@@ -2060,15 +2111,36 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
2060 case RF3052: 2111 case RF3052:
2061 rt2800_config_channel_rf3052(rt2x00dev, conf, rf, info); 2112 rt2800_config_channel_rf3052(rt2x00dev, conf, rf, info);
2062 break; 2113 break;
2114 case RF3290:
2115 rt2800_config_channel_rf3290(rt2x00dev, conf, rf, info);
2116 break;
2117 case RF5360:
2063 case RF5370: 2118 case RF5370:
2064 case RF5372: 2119 case RF5372:
2065 case RF5390: 2120 case RF5390:
2121 case RF5392:
2066 rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info); 2122 rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info);
2067 break; 2123 break;
2068 default: 2124 default:
2069 rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info); 2125 rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
2070 } 2126 }
2071 2127
2128 if (rt2x00_rf(rt2x00dev, RF3290) ||
2129 rt2x00_rf(rt2x00dev, RF5360) ||
2130 rt2x00_rf(rt2x00dev, RF5370) ||
2131 rt2x00_rf(rt2x00dev, RF5372) ||
2132 rt2x00_rf(rt2x00dev, RF5390) ||
2133 rt2x00_rf(rt2x00dev, RF5392)) {
2134 rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
2135 rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, 0);
2136 rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, 0);
2137 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
2138
2139 rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
2140 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
2141 rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
2142 }
2143
2072 /* 2144 /*
2073 * Change BBP settings 2145 * Change BBP settings
2074 */ 2146 */
@@ -2549,9 +2621,12 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
2549 rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1); 2621 rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
2550 rt2800_rfcsr_write(rt2x00dev, 7, rfcsr); 2622 rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
2551 break; 2623 break;
2624 case RF3290:
2625 case RF5360:
2552 case RF5370: 2626 case RF5370:
2553 case RF5372: 2627 case RF5372:
2554 case RF5390: 2628 case RF5390:
2629 case RF5392:
2555 rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr); 2630 rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
2556 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1); 2631 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
2557 rt2800_rfcsr_write(rt2x00dev, 3, rfcsr); 2632 rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
@@ -2682,6 +2757,7 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
2682 if (rt2x00_rt(rt2x00dev, RT3070) || 2757 if (rt2x00_rt(rt2x00dev, RT3070) ||
2683 rt2x00_rt(rt2x00dev, RT3071) || 2758 rt2x00_rt(rt2x00dev, RT3071) ||
2684 rt2x00_rt(rt2x00dev, RT3090) || 2759 rt2x00_rt(rt2x00dev, RT3090) ||
2760 rt2x00_rt(rt2x00dev, RT3290) ||
2685 rt2x00_rt(rt2x00dev, RT3390) || 2761 rt2x00_rt(rt2x00dev, RT3390) ||
2686 rt2x00_rt(rt2x00dev, RT5390) || 2762 rt2x00_rt(rt2x00dev, RT5390) ||
2687 rt2x00_rt(rt2x00dev, RT5392)) 2763 rt2x00_rt(rt2x00dev, RT5392))
@@ -2778,10 +2854,54 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
2778 rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_CC_DELAY_TIME, 2); 2854 rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_CC_DELAY_TIME, 2);
2779 rt2800_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg); 2855 rt2800_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg);
2780 2856
2857 if (rt2x00_rt(rt2x00dev, RT3290)) {
2858 rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
2859 if (rt2x00_get_field32(reg, WLAN_EN) == 1) {
2860 rt2x00_set_field32(&reg, PCIE_APP0_CLK_REQ, 1);
2861 rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
2862 }
2863
2864 rt2800_register_read(rt2x00dev, CMB_CTRL, &reg);
2865 if (!(rt2x00_get_field32(reg, LDO0_EN) == 1)) {
2866 rt2x00_set_field32(&reg, LDO0_EN, 1);
2867 rt2x00_set_field32(&reg, LDO_BGSEL, 3);
2868 rt2800_register_write(rt2x00dev, CMB_CTRL, reg);
2869 }
2870
2871 rt2800_register_read(rt2x00dev, OSC_CTRL, &reg);
2872 rt2x00_set_field32(&reg, OSC_ROSC_EN, 1);
2873 rt2x00_set_field32(&reg, OSC_CAL_REQ, 1);
2874 rt2x00_set_field32(&reg, OSC_REF_CYCLE, 0x27);
2875 rt2800_register_write(rt2x00dev, OSC_CTRL, reg);
2876
2877 rt2800_register_read(rt2x00dev, COEX_CFG0, &reg);
2878 rt2x00_set_field32(&reg, COEX_CFG_ANT, 0x5e);
2879 rt2800_register_write(rt2x00dev, COEX_CFG0, reg);
2880
2881 rt2800_register_read(rt2x00dev, COEX_CFG2, &reg);
2882 rt2x00_set_field32(&reg, BT_COEX_CFG1, 0x00);
2883 rt2x00_set_field32(&reg, BT_COEX_CFG0, 0x17);
2884 rt2x00_set_field32(&reg, WL_COEX_CFG1, 0x93);
2885 rt2x00_set_field32(&reg, WL_COEX_CFG0, 0x7f);
2886 rt2800_register_write(rt2x00dev, COEX_CFG2, reg);
2887
2888 rt2800_register_read(rt2x00dev, PLL_CTRL, &reg);
2889 rt2x00_set_field32(&reg, PLL_CONTROL, 1);
2890 rt2800_register_write(rt2x00dev, PLL_CTRL, reg);
2891 }
2892
2781 if (rt2x00_rt(rt2x00dev, RT3071) || 2893 if (rt2x00_rt(rt2x00dev, RT3071) ||
2782 rt2x00_rt(rt2x00dev, RT3090) || 2894 rt2x00_rt(rt2x00dev, RT3090) ||
2895 rt2x00_rt(rt2x00dev, RT3290) ||
2783 rt2x00_rt(rt2x00dev, RT3390)) { 2896 rt2x00_rt(rt2x00dev, RT3390)) {
2784 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400); 2897
2898 if (rt2x00_rt(rt2x00dev, RT3290))
2899 rt2800_register_write(rt2x00dev, TX_SW_CFG0,
2900 0x00000404);
2901 else
2902 rt2800_register_write(rt2x00dev, TX_SW_CFG0,
2903 0x00000400);
2904
2785 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000); 2905 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
2786 if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) || 2906 if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
2787 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) || 2907 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
@@ -3190,14 +3310,16 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3190 rt2800_wait_bbp_ready(rt2x00dev))) 3310 rt2800_wait_bbp_ready(rt2x00dev)))
3191 return -EACCES; 3311 return -EACCES;
3192 3312
3193 if (rt2x00_rt(rt2x00dev, RT5390) || 3313 if (rt2x00_rt(rt2x00dev, RT3290) ||
3194 rt2x00_rt(rt2x00dev, RT5392)) { 3314 rt2x00_rt(rt2x00dev, RT5390) ||
3315 rt2x00_rt(rt2x00dev, RT5392)) {
3195 rt2800_bbp_read(rt2x00dev, 4, &value); 3316 rt2800_bbp_read(rt2x00dev, 4, &value);
3196 rt2x00_set_field8(&value, BBP4_MAC_IF_CTRL, 1); 3317 rt2x00_set_field8(&value, BBP4_MAC_IF_CTRL, 1);
3197 rt2800_bbp_write(rt2x00dev, 4, value); 3318 rt2800_bbp_write(rt2x00dev, 4, value);
3198 } 3319 }
3199 3320
3200 if (rt2800_is_305x_soc(rt2x00dev) || 3321 if (rt2800_is_305x_soc(rt2x00dev) ||
3322 rt2x00_rt(rt2x00dev, RT3290) ||
3201 rt2x00_rt(rt2x00dev, RT3572) || 3323 rt2x00_rt(rt2x00dev, RT3572) ||
3202 rt2x00_rt(rt2x00dev, RT5390) || 3324 rt2x00_rt(rt2x00dev, RT5390) ||
3203 rt2x00_rt(rt2x00dev, RT5392)) 3325 rt2x00_rt(rt2x00dev, RT5392))
@@ -3206,20 +3328,26 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3206 rt2800_bbp_write(rt2x00dev, 65, 0x2c); 3328 rt2800_bbp_write(rt2x00dev, 65, 0x2c);
3207 rt2800_bbp_write(rt2x00dev, 66, 0x38); 3329 rt2800_bbp_write(rt2x00dev, 66, 0x38);
3208 3330
3209 if (rt2x00_rt(rt2x00dev, RT5390) || 3331 if (rt2x00_rt(rt2x00dev, RT3290) ||
3210 rt2x00_rt(rt2x00dev, RT5392)) 3332 rt2x00_rt(rt2x00dev, RT5390) ||
3333 rt2x00_rt(rt2x00dev, RT5392))
3211 rt2800_bbp_write(rt2x00dev, 68, 0x0b); 3334 rt2800_bbp_write(rt2x00dev, 68, 0x0b);
3212 3335
3213 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) { 3336 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) {
3214 rt2800_bbp_write(rt2x00dev, 69, 0x16); 3337 rt2800_bbp_write(rt2x00dev, 69, 0x16);
3215 rt2800_bbp_write(rt2x00dev, 73, 0x12); 3338 rt2800_bbp_write(rt2x00dev, 73, 0x12);
3216 } else if (rt2x00_rt(rt2x00dev, RT5390) || 3339 } else if (rt2x00_rt(rt2x00dev, RT3290) ||
3217 rt2x00_rt(rt2x00dev, RT5392)) { 3340 rt2x00_rt(rt2x00dev, RT5390) ||
3341 rt2x00_rt(rt2x00dev, RT5392)) {
3218 rt2800_bbp_write(rt2x00dev, 69, 0x12); 3342 rt2800_bbp_write(rt2x00dev, 69, 0x12);
3219 rt2800_bbp_write(rt2x00dev, 73, 0x13); 3343 rt2800_bbp_write(rt2x00dev, 73, 0x13);
3220 rt2800_bbp_write(rt2x00dev, 75, 0x46); 3344 rt2800_bbp_write(rt2x00dev, 75, 0x46);
3221 rt2800_bbp_write(rt2x00dev, 76, 0x28); 3345 rt2800_bbp_write(rt2x00dev, 76, 0x28);
3222 rt2800_bbp_write(rt2x00dev, 77, 0x59); 3346
3347 if (rt2x00_rt(rt2x00dev, RT3290))
3348 rt2800_bbp_write(rt2x00dev, 77, 0x58);
3349 else
3350 rt2800_bbp_write(rt2x00dev, 77, 0x59);
3223 } else { 3351 } else {
3224 rt2800_bbp_write(rt2x00dev, 69, 0x12); 3352 rt2800_bbp_write(rt2x00dev, 69, 0x12);
3225 rt2800_bbp_write(rt2x00dev, 73, 0x10); 3353 rt2800_bbp_write(rt2x00dev, 73, 0x10);
@@ -3244,23 +3372,33 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3244 rt2800_bbp_write(rt2x00dev, 81, 0x37); 3372 rt2800_bbp_write(rt2x00dev, 81, 0x37);
3245 } 3373 }
3246 3374
3375 if (rt2x00_rt(rt2x00dev, RT3290)) {
3376 rt2800_bbp_write(rt2x00dev, 74, 0x0b);
3377 rt2800_bbp_write(rt2x00dev, 79, 0x18);
3378 rt2800_bbp_write(rt2x00dev, 80, 0x09);
3379 rt2800_bbp_write(rt2x00dev, 81, 0x33);
3380 }
3381
3247 rt2800_bbp_write(rt2x00dev, 82, 0x62); 3382 rt2800_bbp_write(rt2x00dev, 82, 0x62);
3248 if (rt2x00_rt(rt2x00dev, RT5390) || 3383 if (rt2x00_rt(rt2x00dev, RT3290) ||
3249 rt2x00_rt(rt2x00dev, RT5392)) 3384 rt2x00_rt(rt2x00dev, RT5390) ||
3385 rt2x00_rt(rt2x00dev, RT5392))
3250 rt2800_bbp_write(rt2x00dev, 83, 0x7a); 3386 rt2800_bbp_write(rt2x00dev, 83, 0x7a);
3251 else 3387 else
3252 rt2800_bbp_write(rt2x00dev, 83, 0x6a); 3388 rt2800_bbp_write(rt2x00dev, 83, 0x6a);
3253 3389
3254 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D)) 3390 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D))
3255 rt2800_bbp_write(rt2x00dev, 84, 0x19); 3391 rt2800_bbp_write(rt2x00dev, 84, 0x19);
3256 else if (rt2x00_rt(rt2x00dev, RT5390) || 3392 else if (rt2x00_rt(rt2x00dev, RT3290) ||
3257 rt2x00_rt(rt2x00dev, RT5392)) 3393 rt2x00_rt(rt2x00dev, RT5390) ||
3394 rt2x00_rt(rt2x00dev, RT5392))
3258 rt2800_bbp_write(rt2x00dev, 84, 0x9a); 3395 rt2800_bbp_write(rt2x00dev, 84, 0x9a);
3259 else 3396 else
3260 rt2800_bbp_write(rt2x00dev, 84, 0x99); 3397 rt2800_bbp_write(rt2x00dev, 84, 0x99);
3261 3398
3262 if (rt2x00_rt(rt2x00dev, RT5390) || 3399 if (rt2x00_rt(rt2x00dev, RT3290) ||
3263 rt2x00_rt(rt2x00dev, RT5392)) 3400 rt2x00_rt(rt2x00dev, RT5390) ||
3401 rt2x00_rt(rt2x00dev, RT5392))
3264 rt2800_bbp_write(rt2x00dev, 86, 0x38); 3402 rt2800_bbp_write(rt2x00dev, 86, 0x38);
3265 else 3403 else
3266 rt2800_bbp_write(rt2x00dev, 86, 0x00); 3404 rt2800_bbp_write(rt2x00dev, 86, 0x00);
@@ -3270,8 +3408,9 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3270 3408
3271 rt2800_bbp_write(rt2x00dev, 91, 0x04); 3409 rt2800_bbp_write(rt2x00dev, 91, 0x04);
3272 3410
3273 if (rt2x00_rt(rt2x00dev, RT5390) || 3411 if (rt2x00_rt(rt2x00dev, RT3290) ||
3274 rt2x00_rt(rt2x00dev, RT5392)) 3412 rt2x00_rt(rt2x00dev, RT5390) ||
3413 rt2x00_rt(rt2x00dev, RT5392))
3275 rt2800_bbp_write(rt2x00dev, 92, 0x02); 3414 rt2800_bbp_write(rt2x00dev, 92, 0x02);
3276 else 3415 else
3277 rt2800_bbp_write(rt2x00dev, 92, 0x00); 3416 rt2800_bbp_write(rt2x00dev, 92, 0x00);
@@ -3285,6 +3424,7 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3285 rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) || 3424 rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) ||
3286 rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) || 3425 rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) ||
3287 rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) || 3426 rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) ||
3427 rt2x00_rt(rt2x00dev, RT3290) ||
3288 rt2x00_rt(rt2x00dev, RT3572) || 3428 rt2x00_rt(rt2x00dev, RT3572) ||
3289 rt2x00_rt(rt2x00dev, RT5390) || 3429 rt2x00_rt(rt2x00dev, RT5390) ||
3290 rt2x00_rt(rt2x00dev, RT5392) || 3430 rt2x00_rt(rt2x00dev, RT5392) ||
@@ -3293,27 +3433,32 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3293 else 3433 else
3294 rt2800_bbp_write(rt2x00dev, 103, 0x00); 3434 rt2800_bbp_write(rt2x00dev, 103, 0x00);
3295 3435
3296 if (rt2x00_rt(rt2x00dev, RT5390) || 3436 if (rt2x00_rt(rt2x00dev, RT3290) ||
3297 rt2x00_rt(rt2x00dev, RT5392)) 3437 rt2x00_rt(rt2x00dev, RT5390) ||
3438 rt2x00_rt(rt2x00dev, RT5392))
3298 rt2800_bbp_write(rt2x00dev, 104, 0x92); 3439 rt2800_bbp_write(rt2x00dev, 104, 0x92);
3299 3440
3300 if (rt2800_is_305x_soc(rt2x00dev)) 3441 if (rt2800_is_305x_soc(rt2x00dev))
3301 rt2800_bbp_write(rt2x00dev, 105, 0x01); 3442 rt2800_bbp_write(rt2x00dev, 105, 0x01);
3443 else if (rt2x00_rt(rt2x00dev, RT3290))
3444 rt2800_bbp_write(rt2x00dev, 105, 0x1c);
3302 else if (rt2x00_rt(rt2x00dev, RT5390) || 3445 else if (rt2x00_rt(rt2x00dev, RT5390) ||
3303 rt2x00_rt(rt2x00dev, RT5392)) 3446 rt2x00_rt(rt2x00dev, RT5392))
3304 rt2800_bbp_write(rt2x00dev, 105, 0x3c); 3447 rt2800_bbp_write(rt2x00dev, 105, 0x3c);
3305 else 3448 else
3306 rt2800_bbp_write(rt2x00dev, 105, 0x05); 3449 rt2800_bbp_write(rt2x00dev, 105, 0x05);
3307 3450
3308 if (rt2x00_rt(rt2x00dev, RT5390)) 3451 if (rt2x00_rt(rt2x00dev, RT3290) ||
3452 rt2x00_rt(rt2x00dev, RT5390))
3309 rt2800_bbp_write(rt2x00dev, 106, 0x03); 3453 rt2800_bbp_write(rt2x00dev, 106, 0x03);
3310 else if (rt2x00_rt(rt2x00dev, RT5392)) 3454 else if (rt2x00_rt(rt2x00dev, RT5392))
3311 rt2800_bbp_write(rt2x00dev, 106, 0x12); 3455 rt2800_bbp_write(rt2x00dev, 106, 0x12);
3312 else 3456 else
3313 rt2800_bbp_write(rt2x00dev, 106, 0x35); 3457 rt2800_bbp_write(rt2x00dev, 106, 0x35);
3314 3458
3315 if (rt2x00_rt(rt2x00dev, RT5390) || 3459 if (rt2x00_rt(rt2x00dev, RT3290) ||
3316 rt2x00_rt(rt2x00dev, RT5392)) 3460 rt2x00_rt(rt2x00dev, RT5390) ||
3461 rt2x00_rt(rt2x00dev, RT5392))
3317 rt2800_bbp_write(rt2x00dev, 128, 0x12); 3462 rt2800_bbp_write(rt2x00dev, 128, 0x12);
3318 3463
3319 if (rt2x00_rt(rt2x00dev, RT5392)) { 3464 if (rt2x00_rt(rt2x00dev, RT5392)) {
@@ -3338,6 +3483,29 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3338 rt2800_bbp_write(rt2x00dev, 138, value); 3483 rt2800_bbp_write(rt2x00dev, 138, value);
3339 } 3484 }
3340 3485
3486 if (rt2x00_rt(rt2x00dev, RT3290)) {
3487 rt2800_bbp_write(rt2x00dev, 67, 0x24);
3488 rt2800_bbp_write(rt2x00dev, 143, 0x04);
3489 rt2800_bbp_write(rt2x00dev, 142, 0x99);
3490 rt2800_bbp_write(rt2x00dev, 150, 0x30);
3491 rt2800_bbp_write(rt2x00dev, 151, 0x2e);
3492 rt2800_bbp_write(rt2x00dev, 152, 0x20);
3493 rt2800_bbp_write(rt2x00dev, 153, 0x34);
3494 rt2800_bbp_write(rt2x00dev, 154, 0x40);
3495 rt2800_bbp_write(rt2x00dev, 155, 0x3b);
3496 rt2800_bbp_write(rt2x00dev, 253, 0x04);
3497
3498 rt2800_bbp_read(rt2x00dev, 47, &value);
3499 rt2x00_set_field8(&value, BBP47_TSSI_ADC6, 1);
3500 rt2800_bbp_write(rt2x00dev, 47, value);
3501
3502 /* Use 5-bit ADC for Acquisition and 8-bit ADC for data */
3503 rt2800_bbp_read(rt2x00dev, 3, &value);
3504 rt2x00_set_field8(&value, BBP3_ADC_MODE_SWITCH, 1);
3505 rt2x00_set_field8(&value, BBP3_ADC_INIT_MODE, 1);
3506 rt2800_bbp_write(rt2x00dev, 3, value);
3507 }
3508
3341 if (rt2x00_rt(rt2x00dev, RT5390) || 3509 if (rt2x00_rt(rt2x00dev, RT5390) ||
3342 rt2x00_rt(rt2x00dev, RT5392)) { 3510 rt2x00_rt(rt2x00dev, RT5392)) {
3343 int ant, div_mode; 3511 int ant, div_mode;
@@ -3470,6 +3638,7 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
3470 if (!rt2x00_rt(rt2x00dev, RT3070) && 3638 if (!rt2x00_rt(rt2x00dev, RT3070) &&
3471 !rt2x00_rt(rt2x00dev, RT3071) && 3639 !rt2x00_rt(rt2x00dev, RT3071) &&
3472 !rt2x00_rt(rt2x00dev, RT3090) && 3640 !rt2x00_rt(rt2x00dev, RT3090) &&
3641 !rt2x00_rt(rt2x00dev, RT3290) &&
3473 !rt2x00_rt(rt2x00dev, RT3390) && 3642 !rt2x00_rt(rt2x00dev, RT3390) &&
3474 !rt2x00_rt(rt2x00dev, RT3572) && 3643 !rt2x00_rt(rt2x00dev, RT3572) &&
3475 !rt2x00_rt(rt2x00dev, RT5390) && 3644 !rt2x00_rt(rt2x00dev, RT5390) &&
@@ -3480,8 +3649,9 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
3480 /* 3649 /*
3481 * Init RF calibration. 3650 * Init RF calibration.
3482 */ 3651 */
3483 if (rt2x00_rt(rt2x00dev, RT5390) || 3652 if (rt2x00_rt(rt2x00dev, RT3290) ||
3484 rt2x00_rt(rt2x00dev, RT5392)) { 3653 rt2x00_rt(rt2x00dev, RT5390) ||
3654 rt2x00_rt(rt2x00dev, RT5392)) {
3485 rt2800_rfcsr_read(rt2x00dev, 2, &rfcsr); 3655 rt2800_rfcsr_read(rt2x00dev, 2, &rfcsr);
3486 rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1); 3656 rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1);
3487 rt2800_rfcsr_write(rt2x00dev, 2, rfcsr); 3657 rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
@@ -3519,6 +3689,53 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
3519 rt2800_rfcsr_write(rt2x00dev, 24, 0x16); 3689 rt2800_rfcsr_write(rt2x00dev, 24, 0x16);
3520 rt2800_rfcsr_write(rt2x00dev, 25, 0x01); 3690 rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
3521 rt2800_rfcsr_write(rt2x00dev, 29, 0x1f); 3691 rt2800_rfcsr_write(rt2x00dev, 29, 0x1f);
3692 } else if (rt2x00_rt(rt2x00dev, RT3290)) {
3693 rt2800_rfcsr_write(rt2x00dev, 1, 0x0f);
3694 rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
3695 rt2800_rfcsr_write(rt2x00dev, 3, 0x08);
3696 rt2800_rfcsr_write(rt2x00dev, 4, 0x00);
3697 rt2800_rfcsr_write(rt2x00dev, 6, 0xa0);
3698 rt2800_rfcsr_write(rt2x00dev, 8, 0xf3);
3699 rt2800_rfcsr_write(rt2x00dev, 9, 0x02);
3700 rt2800_rfcsr_write(rt2x00dev, 10, 0x53);
3701 rt2800_rfcsr_write(rt2x00dev, 11, 0x4a);
3702 rt2800_rfcsr_write(rt2x00dev, 12, 0x46);
3703 rt2800_rfcsr_write(rt2x00dev, 13, 0x9f);
3704 rt2800_rfcsr_write(rt2x00dev, 18, 0x02);
3705 rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
3706 rt2800_rfcsr_write(rt2x00dev, 25, 0x83);
3707 rt2800_rfcsr_write(rt2x00dev, 26, 0x82);
3708 rt2800_rfcsr_write(rt2x00dev, 27, 0x09);
3709 rt2800_rfcsr_write(rt2x00dev, 29, 0x10);
3710 rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
3711 rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
3712 rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
3713 rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
3714 rt2800_rfcsr_write(rt2x00dev, 34, 0x05);
3715 rt2800_rfcsr_write(rt2x00dev, 35, 0x12);
3716 rt2800_rfcsr_write(rt2x00dev, 36, 0x00);
3717 rt2800_rfcsr_write(rt2x00dev, 38, 0x85);
3718 rt2800_rfcsr_write(rt2x00dev, 39, 0x1b);
3719 rt2800_rfcsr_write(rt2x00dev, 40, 0x0b);
3720 rt2800_rfcsr_write(rt2x00dev, 41, 0xbb);
3721 rt2800_rfcsr_write(rt2x00dev, 42, 0xd5);
3722 rt2800_rfcsr_write(rt2x00dev, 43, 0x7b);
3723 rt2800_rfcsr_write(rt2x00dev, 44, 0x0e);
3724 rt2800_rfcsr_write(rt2x00dev, 45, 0xa2);
3725 rt2800_rfcsr_write(rt2x00dev, 46, 0x73);
3726 rt2800_rfcsr_write(rt2x00dev, 47, 0x00);
3727 rt2800_rfcsr_write(rt2x00dev, 48, 0x10);
3728 rt2800_rfcsr_write(rt2x00dev, 49, 0x98);
3729 rt2800_rfcsr_write(rt2x00dev, 52, 0x38);
3730 rt2800_rfcsr_write(rt2x00dev, 53, 0x00);
3731 rt2800_rfcsr_write(rt2x00dev, 54, 0x78);
3732 rt2800_rfcsr_write(rt2x00dev, 55, 0x43);
3733 rt2800_rfcsr_write(rt2x00dev, 56, 0x02);
3734 rt2800_rfcsr_write(rt2x00dev, 57, 0x80);
3735 rt2800_rfcsr_write(rt2x00dev, 58, 0x7f);
3736 rt2800_rfcsr_write(rt2x00dev, 59, 0x09);
3737 rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
3738 rt2800_rfcsr_write(rt2x00dev, 61, 0xc1);
3522 } else if (rt2x00_rt(rt2x00dev, RT3390)) { 3739 } else if (rt2x00_rt(rt2x00dev, RT3390)) {
3523 rt2800_rfcsr_write(rt2x00dev, 0, 0xa0); 3740 rt2800_rfcsr_write(rt2x00dev, 0, 0xa0);
3524 rt2800_rfcsr_write(rt2x00dev, 1, 0xe1); 3741 rt2800_rfcsr_write(rt2x00dev, 1, 0xe1);
@@ -3927,6 +4144,12 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
3927 rt2800_rfcsr_write(rt2x00dev, 27, rfcsr); 4144 rt2800_rfcsr_write(rt2x00dev, 27, rfcsr);
3928 } 4145 }
3929 4146
4147 if (rt2x00_rt(rt2x00dev, RT3290)) {
4148 rt2800_rfcsr_read(rt2x00dev, 29, &rfcsr);
4149 rt2x00_set_field8(&rfcsr, RFCSR29_RSSI_GAIN, 3);
4150 rt2800_rfcsr_write(rt2x00dev, 29, rfcsr);
4151 }
4152
3930 if (rt2x00_rt(rt2x00dev, RT5390) || 4153 if (rt2x00_rt(rt2x00dev, RT5390) ||
3931 rt2x00_rt(rt2x00dev, RT5392)) { 4154 rt2x00_rt(rt2x00dev, RT5392)) {
3932 rt2800_rfcsr_read(rt2x00dev, 38, &rfcsr); 4155 rt2800_rfcsr_read(rt2x00dev, 38, &rfcsr);
@@ -4033,9 +4256,14 @@ EXPORT_SYMBOL_GPL(rt2800_disable_radio);
4033int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev) 4256int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev)
4034{ 4257{
4035 u32 reg; 4258 u32 reg;
4259 u16 efuse_ctrl_reg;
4036 4260
4037 rt2800_register_read(rt2x00dev, EFUSE_CTRL, &reg); 4261 if (rt2x00_rt(rt2x00dev, RT3290))
4262 efuse_ctrl_reg = EFUSE_CTRL_3290;
4263 else
4264 efuse_ctrl_reg = EFUSE_CTRL;
4038 4265
4266 rt2800_register_read(rt2x00dev, efuse_ctrl_reg, &reg);
4039 return rt2x00_get_field32(reg, EFUSE_CTRL_PRESENT); 4267 return rt2x00_get_field32(reg, EFUSE_CTRL_PRESENT);
4040} 4268}
4041EXPORT_SYMBOL_GPL(rt2800_efuse_detect); 4269EXPORT_SYMBOL_GPL(rt2800_efuse_detect);
@@ -4043,27 +4271,44 @@ EXPORT_SYMBOL_GPL(rt2800_efuse_detect);
4043static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i) 4271static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)
4044{ 4272{
4045 u32 reg; 4273 u32 reg;
4046 4274 u16 efuse_ctrl_reg;
4275 u16 efuse_data0_reg;
4276 u16 efuse_data1_reg;
4277 u16 efuse_data2_reg;
4278 u16 efuse_data3_reg;
4279
4280 if (rt2x00_rt(rt2x00dev, RT3290)) {
4281 efuse_ctrl_reg = EFUSE_CTRL_3290;
4282 efuse_data0_reg = EFUSE_DATA0_3290;
4283 efuse_data1_reg = EFUSE_DATA1_3290;
4284 efuse_data2_reg = EFUSE_DATA2_3290;
4285 efuse_data3_reg = EFUSE_DATA3_3290;
4286 } else {
4287 efuse_ctrl_reg = EFUSE_CTRL;
4288 efuse_data0_reg = EFUSE_DATA0;
4289 efuse_data1_reg = EFUSE_DATA1;
4290 efuse_data2_reg = EFUSE_DATA2;
4291 efuse_data3_reg = EFUSE_DATA3;
4292 }
4047 mutex_lock(&rt2x00dev->csr_mutex); 4293 mutex_lock(&rt2x00dev->csr_mutex);
4048 4294
4049 rt2800_register_read_lock(rt2x00dev, EFUSE_CTRL, &reg); 4295 rt2800_register_read_lock(rt2x00dev, efuse_ctrl_reg, &reg);
4050 rt2x00_set_field32(&reg, EFUSE_CTRL_ADDRESS_IN, i); 4296 rt2x00_set_field32(&reg, EFUSE_CTRL_ADDRESS_IN, i);
4051 rt2x00_set_field32(&reg, EFUSE_CTRL_MODE, 0); 4297 rt2x00_set_field32(&reg, EFUSE_CTRL_MODE, 0);
4052 rt2x00_set_field32(&reg, EFUSE_CTRL_KICK, 1); 4298 rt2x00_set_field32(&reg, EFUSE_CTRL_KICK, 1);
4053 rt2800_register_write_lock(rt2x00dev, EFUSE_CTRL, reg); 4299 rt2800_register_write_lock(rt2x00dev, efuse_ctrl_reg, reg);
4054 4300
4055 /* Wait until the EEPROM has been loaded */ 4301 /* Wait until the EEPROM has been loaded */
4056 rt2800_regbusy_read(rt2x00dev, EFUSE_CTRL, EFUSE_CTRL_KICK, &reg); 4302 rt2800_regbusy_read(rt2x00dev, efuse_ctrl_reg, EFUSE_CTRL_KICK, &reg);
4057
4058 /* Apparently the data is read from end to start */ 4303 /* Apparently the data is read from end to start */
4059 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, &reg); 4304 rt2800_register_read_lock(rt2x00dev, efuse_data3_reg, &reg);
4060 /* The returned value is in CPU order, but eeprom is le */ 4305 /* The returned value is in CPU order, but eeprom is le */
4061 *(u32 *)&rt2x00dev->eeprom[i] = cpu_to_le32(reg); 4306 *(u32 *)&rt2x00dev->eeprom[i] = cpu_to_le32(reg);
4062 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, &reg); 4307 rt2800_register_read_lock(rt2x00dev, efuse_data2_reg, &reg);
4063 *(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg); 4308 *(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
4064 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, &reg); 4309 rt2800_register_read_lock(rt2x00dev, efuse_data1_reg, &reg);
4065 *(u32 *)&rt2x00dev->eeprom[i + 4] = cpu_to_le32(reg); 4310 *(u32 *)&rt2x00dev->eeprom[i + 4] = cpu_to_le32(reg);
4066 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0, &reg); 4311 rt2800_register_read_lock(rt2x00dev, efuse_data0_reg, &reg);
4067 *(u32 *)&rt2x00dev->eeprom[i + 6] = cpu_to_le32(reg); 4312 *(u32 *)&rt2x00dev->eeprom[i + 6] = cpu_to_le32(reg);
4068 4313
4069 mutex_unlock(&rt2x00dev->csr_mutex); 4314 mutex_unlock(&rt2x00dev->csr_mutex);
@@ -4090,7 +4335,7 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
4090 */ 4335 */
4091 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); 4336 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
4092 if (!is_valid_ether_addr(mac)) { 4337 if (!is_valid_ether_addr(mac)) {
4093 random_ether_addr(mac); 4338 eth_random_addr(mac);
4094 EEPROM(rt2x00dev, "MAC: %pM\n", mac); 4339 EEPROM(rt2x00dev, "MAC: %pM\n", mac);
4095 } 4340 }
4096 4341
@@ -4225,9 +4470,14 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
4225 * RT28xx/RT30xx: defined in "EEPROM_NIC_CONF0_RF_TYPE" field 4470 * RT28xx/RT30xx: defined in "EEPROM_NIC_CONF0_RF_TYPE" field
4226 * RT53xx: defined in "EEPROM_CHIP_ID" field 4471 * RT53xx: defined in "EEPROM_CHIP_ID" field
4227 */ 4472 */
4228 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg); 4473 if (rt2x00_rt(rt2x00dev, RT3290))
4229 if (rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT5390 || 4474 rt2800_register_read(rt2x00dev, MAC_CSR0_3290, &reg);
4230 rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT5392) 4475 else
4476 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
4477
4478 if (rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT3290 ||
4479 rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT5390 ||
4480 rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT5392)
4231 rt2x00_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &value); 4481 rt2x00_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &value);
4232 else 4482 else
4233 value = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE); 4483 value = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE);
@@ -4242,6 +4492,7 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
4242 case RT3070: 4492 case RT3070:
4243 case RT3071: 4493 case RT3071:
4244 case RT3090: 4494 case RT3090:
4495 case RT3290:
4245 case RT3390: 4496 case RT3390:
4246 case RT3572: 4497 case RT3572:
4247 case RT5390: 4498 case RT5390:
@@ -4262,10 +4513,13 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
4262 case RF3021: 4513 case RF3021:
4263 case RF3022: 4514 case RF3022:
4264 case RF3052: 4515 case RF3052:
4516 case RF3290:
4265 case RF3320: 4517 case RF3320:
4518 case RF5360:
4266 case RF5370: 4519 case RF5370:
4267 case RF5372: 4520 case RF5372:
4268 case RF5390: 4521 case RF5390:
4522 case RF5392:
4269 break; 4523 break;
4270 default: 4524 default:
4271 ERROR(rt2x00dev, "Invalid RF chipset 0x%04x detected.\n", 4525 ERROR(rt2x00dev, "Invalid RF chipset 0x%04x detected.\n",
@@ -4576,10 +4830,13 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
4576 rt2x00_rf(rt2x00dev, RF2020) || 4830 rt2x00_rf(rt2x00dev, RF2020) ||
4577 rt2x00_rf(rt2x00dev, RF3021) || 4831 rt2x00_rf(rt2x00dev, RF3021) ||
4578 rt2x00_rf(rt2x00dev, RF3022) || 4832 rt2x00_rf(rt2x00dev, RF3022) ||
4833 rt2x00_rf(rt2x00dev, RF3290) ||
4579 rt2x00_rf(rt2x00dev, RF3320) || 4834 rt2x00_rf(rt2x00dev, RF3320) ||
4835 rt2x00_rf(rt2x00dev, RF5360) ||
4580 rt2x00_rf(rt2x00dev, RF5370) || 4836 rt2x00_rf(rt2x00dev, RF5370) ||
4581 rt2x00_rf(rt2x00dev, RF5372) || 4837 rt2x00_rf(rt2x00dev, RF5372) ||
4582 rt2x00_rf(rt2x00dev, RF5390)) { 4838 rt2x00_rf(rt2x00dev, RF5390) ||
4839 rt2x00_rf(rt2x00dev, RF5392)) {
4583 spec->num_channels = 14; 4840 spec->num_channels = 14;
4584 spec->channels = rf_vals_3x; 4841 spec->channels = rf_vals_3x;
4585 } else if (rt2x00_rf(rt2x00dev, RF3052)) { 4842 } else if (rt2x00_rf(rt2x00dev, RF3052)) {
@@ -4662,9 +4919,12 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
4662 case RF3022: 4919 case RF3022:
4663 case RF3320: 4920 case RF3320:
4664 case RF3052: 4921 case RF3052:
4922 case RF3290:
4923 case RF5360:
4665 case RF5370: 4924 case RF5370:
4666 case RF5372: 4925 case RF5372:
4667 case RF5390: 4926 case RF5390:
4927 case RF5392:
4668 __set_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags); 4928 __set_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags);
4669 break; 4929 break;
4670 } 4930 }
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index cad25bfebd7a..235376e9cb04 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -280,7 +280,13 @@ static void rt2800pci_stop_queue(struct data_queue *queue)
280 */ 280 */
281static char *rt2800pci_get_firmware_name(struct rt2x00_dev *rt2x00dev) 281static char *rt2800pci_get_firmware_name(struct rt2x00_dev *rt2x00dev)
282{ 282{
283 return FIRMWARE_RT2860; 283 /*
284 * Chip rt3290 use specific 4KB firmware named rt3290.bin.
285 */
286 if (rt2x00_rt(rt2x00dev, RT3290))
287 return FIRMWARE_RT3290;
288 else
289 return FIRMWARE_RT2860;
284} 290}
285 291
286static int rt2800pci_write_firmware(struct rt2x00_dev *rt2x00dev, 292static int rt2800pci_write_firmware(struct rt2x00_dev *rt2x00dev,
@@ -974,6 +980,66 @@ static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
974 return rt2800_validate_eeprom(rt2x00dev); 980 return rt2800_validate_eeprom(rt2x00dev);
975} 981}
976 982
983static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev)
984{
985 u32 reg;
986 int i, count;
987
988 rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
989 if (rt2x00_get_field32(reg, WLAN_EN))
990 return 0;
991
992 rt2x00_set_field32(&reg, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff);
993 rt2x00_set_field32(&reg, FRC_WL_ANT_SET, 1);
994 rt2x00_set_field32(&reg, WLAN_CLK_EN, 0);
995 rt2x00_set_field32(&reg, WLAN_EN, 1);
996 rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
997
998 udelay(REGISTER_BUSY_DELAY);
999
1000 count = 0;
1001 do {
1002 /*
1003 * Check PLL_LD & XTAL_RDY.
1004 */
1005 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1006 rt2800_register_read(rt2x00dev, CMB_CTRL, &reg);
1007 if (rt2x00_get_field32(reg, PLL_LD) &&
1008 rt2x00_get_field32(reg, XTAL_RDY))
1009 break;
1010 udelay(REGISTER_BUSY_DELAY);
1011 }
1012
1013 if (i >= REGISTER_BUSY_COUNT) {
1014
1015 if (count >= 10)
1016 return -EIO;
1017
1018 rt2800_register_write(rt2x00dev, 0x58, 0x018);
1019 udelay(REGISTER_BUSY_DELAY);
1020 rt2800_register_write(rt2x00dev, 0x58, 0x418);
1021 udelay(REGISTER_BUSY_DELAY);
1022 rt2800_register_write(rt2x00dev, 0x58, 0x618);
1023 udelay(REGISTER_BUSY_DELAY);
1024 count++;
1025 } else {
1026 count = 0;
1027 }
1028
1029 rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
1030 rt2x00_set_field32(&reg, PCIE_APP0_CLK_REQ, 0);
1031 rt2x00_set_field32(&reg, WLAN_CLK_EN, 1);
1032 rt2x00_set_field32(&reg, WLAN_RESET, 1);
1033 rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
1034 udelay(10);
1035 rt2x00_set_field32(&reg, WLAN_RESET, 0);
1036 rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
1037 udelay(10);
1038 rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, 0x7fffffff);
1039 } while (count != 0);
1040
1041 return 0;
1042}
977static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev) 1043static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
978{ 1044{
979 int retval; 1045 int retval;
@@ -997,6 +1063,17 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
997 return retval; 1063 return retval;
998 1064
999 /* 1065 /*
1066 * In probe phase call rt2800_enable_wlan_rt3290 to enable wlan
1067 * clk for rt3290. That avoid the MCU fail in start phase.
1068 */
1069 if (rt2x00_rt(rt2x00dev, RT3290)) {
1070 retval = rt2800_enable_wlan_rt3290(rt2x00dev);
1071
1072 if (retval)
1073 return retval;
1074 }
1075
1076 /*
1000 * This device has multiple filters for control frames 1077 * This device has multiple filters for control frames
1001 * and has a separate filter for PS Poll frames. 1078 * and has a separate filter for PS Poll frames.
1002 */ 1079 */
@@ -1175,6 +1252,9 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
1175 { PCI_DEVICE(0x1432, 0x7768) }, 1252 { PCI_DEVICE(0x1432, 0x7768) },
1176 { PCI_DEVICE(0x1462, 0x891a) }, 1253 { PCI_DEVICE(0x1462, 0x891a) },
1177 { PCI_DEVICE(0x1a3b, 0x1059) }, 1254 { PCI_DEVICE(0x1a3b, 0x1059) },
1255#ifdef CONFIG_RT2800PCI_RT3290
1256 { PCI_DEVICE(0x1814, 0x3290) },
1257#endif
1178#ifdef CONFIG_RT2800PCI_RT33XX 1258#ifdef CONFIG_RT2800PCI_RT33XX
1179 { PCI_DEVICE(0x1814, 0x3390) }, 1259 { PCI_DEVICE(0x1814, 0x3390) },
1180#endif 1260#endif
@@ -1188,6 +1268,7 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
1188 { PCI_DEVICE(0x1814, 0x3593) }, 1268 { PCI_DEVICE(0x1814, 0x3593) },
1189#endif 1269#endif
1190#ifdef CONFIG_RT2800PCI_RT53XX 1270#ifdef CONFIG_RT2800PCI_RT53XX
1271 { PCI_DEVICE(0x1814, 0x5360) },
1191 { PCI_DEVICE(0x1814, 0x5362) }, 1272 { PCI_DEVICE(0x1814, 0x5362) },
1192 { PCI_DEVICE(0x1814, 0x5390) }, 1273 { PCI_DEVICE(0x1814, 0x5390) },
1193 { PCI_DEVICE(0x1814, 0x5392) }, 1274 { PCI_DEVICE(0x1814, 0x5392) },
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.h b/drivers/net/wireless/rt2x00/rt2800pci.h
index 70e050d904c8..ab22a087c50d 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.h
+++ b/drivers/net/wireless/rt2x00/rt2800pci.h
@@ -47,6 +47,7 @@
47 * 8051 firmware image. 47 * 8051 firmware image.
48 */ 48 */
49#define FIRMWARE_RT2860 "rt2860.bin" 49#define FIRMWARE_RT2860 "rt2860.bin"
50#define FIRMWARE_RT3290 "rt3290.bin"
50#define FIRMWARE_IMAGE_BASE 0x2000 51#define FIRMWARE_IMAGE_BASE 0x2000
51 52
52/* 53/*
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index bf78317a6adb..6cf336595e25 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -971,6 +971,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
971 { USB_DEVICE(0x0411, 0x015d) }, 971 { USB_DEVICE(0x0411, 0x015d) },
972 { USB_DEVICE(0x0411, 0x016f) }, 972 { USB_DEVICE(0x0411, 0x016f) },
973 { USB_DEVICE(0x0411, 0x01a2) }, 973 { USB_DEVICE(0x0411, 0x01a2) },
974 { USB_DEVICE(0x0411, 0x01ee) },
974 /* Corega */ 975 /* Corega */
975 { USB_DEVICE(0x07aa, 0x002f) }, 976 { USB_DEVICE(0x07aa, 0x002f) },
976 { USB_DEVICE(0x07aa, 0x003c) }, 977 { USB_DEVICE(0x07aa, 0x003c) },
@@ -1137,6 +1138,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
1137#ifdef CONFIG_RT2800USB_RT33XX 1138#ifdef CONFIG_RT2800USB_RT33XX
1138 /* Belkin */ 1139 /* Belkin */
1139 { USB_DEVICE(0x050d, 0x945b) }, 1140 { USB_DEVICE(0x050d, 0x945b) },
1141 /* D-Link */
1142 { USB_DEVICE(0x2001, 0x3c17) },
1140 /* Panasonic */ 1143 /* Panasonic */
1141 { USB_DEVICE(0x083a, 0xb511) }, 1144 { USB_DEVICE(0x083a, 0xb511) },
1142 /* Philips */ 1145 /* Philips */
@@ -1237,7 +1240,6 @@ static struct usb_device_id rt2800usb_device_table[] = {
1237 /* D-Link */ 1240 /* D-Link */
1238 { USB_DEVICE(0x07d1, 0x3c0b) }, 1241 { USB_DEVICE(0x07d1, 0x3c0b) },
1239 { USB_DEVICE(0x07d1, 0x3c17) }, 1242 { USB_DEVICE(0x07d1, 0x3c17) },
1240 { USB_DEVICE(0x2001, 0x3c17) },
1241 /* Encore */ 1243 /* Encore */
1242 { USB_DEVICE(0x203d, 0x14a1) }, 1244 { USB_DEVICE(0x203d, 0x14a1) },
1243 /* Gemtek */ 1245 /* Gemtek */
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 8f754025b06e..8afb546c2b2d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -187,6 +187,7 @@ struct rt2x00_chip {
187#define RT3070 0x3070 187#define RT3070 0x3070
188#define RT3071 0x3071 188#define RT3071 0x3071
189#define RT3090 0x3090 /* 2.4GHz PCIe */ 189#define RT3090 0x3090 /* 2.4GHz PCIe */
190#define RT3290 0x3290
190#define RT3390 0x3390 191#define RT3390 0x3390
191#define RT3572 0x3572 192#define RT3572 0x3572
192#define RT3593 0x3593 193#define RT3593 0x3593
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index e5404e576251..a6b88bd4a1a5 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -1161,6 +1161,8 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1161 BIT(NL80211_IFTYPE_MESH_POINT) | 1161 BIT(NL80211_IFTYPE_MESH_POINT) |
1162 BIT(NL80211_IFTYPE_WDS); 1162 BIT(NL80211_IFTYPE_WDS);
1163 1163
1164 rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
1165
1164 /* 1166 /*
1165 * Initialize work. 1167 * Initialize work.
1166 */ 1168 */
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index dd24b2663b5e..4ff26c2159bf 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -506,9 +506,19 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
506 506
507 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) 507 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
508 return 0; 508 return 0;
509 else if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) 509
510 if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags))
511 return -EOPNOTSUPP;
512
513 /*
514 * To support IBSS RSN, don't program group keys in IBSS, the
515 * hardware will then not attempt to decrypt the frames.
516 */
517 if (vif->type == NL80211_IFTYPE_ADHOC &&
518 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
510 return -EOPNOTSUPP; 519 return -EOPNOTSUPP;
511 else if (key->keylen > 32) 520
521 if (key->keylen > 32)
512 return -ENOSPC; 522 return -ENOSPC;
513 523
514 memset(&crypto, 0, sizeof(crypto)); 524 memset(&crypto, 0, sizeof(crypto));
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 0a4653a92cab..a0c8caef3b0a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -256,6 +256,7 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops)
256 struct ieee80211_hw *hw; 256 struct ieee80211_hw *hw;
257 struct rt2x00_dev *rt2x00dev; 257 struct rt2x00_dev *rt2x00dev;
258 int retval; 258 int retval;
259 u16 chip;
259 260
260 retval = pci_enable_device(pci_dev); 261 retval = pci_enable_device(pci_dev);
261 if (retval) { 262 if (retval) {
@@ -305,6 +306,14 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops)
305 if (retval) 306 if (retval)
306 goto exit_free_device; 307 goto exit_free_device;
307 308
309 /*
310 * Because rt3290 chip use different efuse offset to read efuse data.
311 * So before read efuse it need to indicate it is the
312 * rt3290 or not.
313 */
314 pci_read_config_word(pci_dev, PCI_DEVICE_ID, &chip);
315 rt2x00dev->chip.rt = chip;
316
308 retval = rt2x00lib_probe_dev(rt2x00dev); 317 retval = rt2x00lib_probe_dev(rt2x00dev);
309 if (retval) 318 if (retval)
310 goto exit_free_reg; 319 goto exit_free_reg;
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 2fd830103415..f7e74a0a7759 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -774,9 +774,7 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
774bool rt2x00queue_for_each_entry(struct data_queue *queue, 774bool rt2x00queue_for_each_entry(struct data_queue *queue,
775 enum queue_index start, 775 enum queue_index start,
776 enum queue_index end, 776 enum queue_index end,
777 void *data, 777 bool (*fn)(struct queue_entry *entry))
778 bool (*fn)(struct queue_entry *entry,
779 void *data))
780{ 778{
781 unsigned long irqflags; 779 unsigned long irqflags;
782 unsigned int index_start; 780 unsigned int index_start;
@@ -807,17 +805,17 @@ bool rt2x00queue_for_each_entry(struct data_queue *queue,
807 */ 805 */
808 if (index_start < index_end) { 806 if (index_start < index_end) {
809 for (i = index_start; i < index_end; i++) { 807 for (i = index_start; i < index_end; i++) {
810 if (fn(&queue->entries[i], data)) 808 if (fn(&queue->entries[i]))
811 return true; 809 return true;
812 } 810 }
813 } else { 811 } else {
814 for (i = index_start; i < queue->limit; i++) { 812 for (i = index_start; i < queue->limit; i++) {
815 if (fn(&queue->entries[i], data)) 813 if (fn(&queue->entries[i]))
816 return true; 814 return true;
817 } 815 }
818 816
819 for (i = 0; i < index_end; i++) { 817 for (i = 0; i < index_end; i++) {
820 if (fn(&queue->entries[i], data)) 818 if (fn(&queue->entries[i]))
821 return true; 819 return true;
822 } 820 }
823 } 821 }
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index 5f1392c72673..9b8c10a86dee 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -584,7 +584,6 @@ struct data_queue_desc {
584 * @queue: Pointer to @data_queue 584 * @queue: Pointer to @data_queue
585 * @start: &enum queue_index Pointer to start index 585 * @start: &enum queue_index Pointer to start index
586 * @end: &enum queue_index Pointer to end index 586 * @end: &enum queue_index Pointer to end index
587 * @data: Data to pass to the callback function
588 * @fn: The function to call for each &struct queue_entry 587 * @fn: The function to call for each &struct queue_entry
589 * 588 *
590 * This will walk through all entries in the queue, in chronological 589 * This will walk through all entries in the queue, in chronological
@@ -597,9 +596,7 @@ struct data_queue_desc {
597bool rt2x00queue_for_each_entry(struct data_queue *queue, 596bool rt2x00queue_for_each_entry(struct data_queue *queue,
598 enum queue_index start, 597 enum queue_index start,
599 enum queue_index end, 598 enum queue_index end,
600 void *data, 599 bool (*fn)(struct queue_entry *entry));
601 bool (*fn)(struct queue_entry *entry,
602 void *data));
603 600
604/** 601/**
605 * rt2x00queue_empty - Check if the queue is empty. 602 * rt2x00queue_empty - Check if the queue is empty.
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 74ecc33fdd90..40ea80725a96 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -285,7 +285,7 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
285 queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work); 285 queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
286} 286}
287 287
288static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void* data) 288static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry)
289{ 289{
290 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 290 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
291 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); 291 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
@@ -390,7 +390,7 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
390 queue_work(rt2x00dev->workqueue, &rt2x00dev->rxdone_work); 390 queue_work(rt2x00dev->workqueue, &rt2x00dev->rxdone_work);
391} 391}
392 392
393static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void* data) 393static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry)
394{ 394{
395 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 395 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
396 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); 396 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
@@ -427,18 +427,12 @@ void rt2x00usb_kick_queue(struct data_queue *queue)
427 case QID_AC_BE: 427 case QID_AC_BE:
428 case QID_AC_BK: 428 case QID_AC_BK:
429 if (!rt2x00queue_empty(queue)) 429 if (!rt2x00queue_empty(queue))
430 rt2x00queue_for_each_entry(queue, 430 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
431 Q_INDEX_DONE,
432 Q_INDEX,
433 NULL,
434 rt2x00usb_kick_tx_entry); 431 rt2x00usb_kick_tx_entry);
435 break; 432 break;
436 case QID_RX: 433 case QID_RX:
437 if (!rt2x00queue_full(queue)) 434 if (!rt2x00queue_full(queue))
438 rt2x00queue_for_each_entry(queue, 435 rt2x00queue_for_each_entry(queue, Q_INDEX, Q_INDEX_DONE,
439 Q_INDEX,
440 Q_INDEX_DONE,
441 NULL,
442 rt2x00usb_kick_rx_entry); 436 rt2x00usb_kick_rx_entry);
443 break; 437 break;
444 default: 438 default:
@@ -447,7 +441,7 @@ void rt2x00usb_kick_queue(struct data_queue *queue)
447} 441}
448EXPORT_SYMBOL_GPL(rt2x00usb_kick_queue); 442EXPORT_SYMBOL_GPL(rt2x00usb_kick_queue);
449 443
450static bool rt2x00usb_flush_entry(struct queue_entry *entry, void* data) 444static bool rt2x00usb_flush_entry(struct queue_entry *entry)
451{ 445{
452 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 446 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
453 struct queue_entry_priv_usb *entry_priv = entry->priv_data; 447 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
@@ -474,7 +468,7 @@ void rt2x00usb_flush_queue(struct data_queue *queue, bool drop)
474 unsigned int i; 468 unsigned int i;
475 469
476 if (drop) 470 if (drop)
477 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX, NULL, 471 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
478 rt2x00usb_flush_entry); 472 rt2x00usb_flush_entry);
479 473
480 /* 474 /*
@@ -565,7 +559,7 @@ void rt2x00usb_clear_entry(struct queue_entry *entry)
565 entry->flags = 0; 559 entry->flags = 0;
566 560
567 if (entry->queue->qid == QID_RX) 561 if (entry->queue->qid == QID_RX)
568 rt2x00usb_kick_rx_entry(entry, NULL); 562 rt2x00usb_kick_rx_entry(entry);
569} 563}
570EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry); 564EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry);
571 565
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index ee22bd74579d..f32259686b45 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -2415,7 +2415,7 @@ static int rt61pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
2415 */ 2415 */
2416 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); 2416 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
2417 if (!is_valid_ether_addr(mac)) { 2417 if (!is_valid_ether_addr(mac)) {
2418 random_ether_addr(mac); 2418 eth_random_addr(mac);
2419 EEPROM(rt2x00dev, "MAC: %pM\n", mac); 2419 EEPROM(rt2x00dev, "MAC: %pM\n", mac);
2420 } 2420 }
2421 2421
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 77ccbbc7da41..ba6e434b859d 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -1770,7 +1770,7 @@ static int rt73usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1770 */ 1770 */
1771 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); 1771 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
1772 if (!is_valid_ether_addr(mac)) { 1772 if (!is_valid_ether_addr(mac)) {
1773 random_ether_addr(mac); 1773 eth_random_addr(mac);
1774 EEPROM(rt2x00dev, "MAC: %pM\n", mac); 1774 EEPROM(rt2x00dev, "MAC: %pM\n", mac);
1775 } 1775 }
1776 1776
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 2bebcb71a1e9..aceaf689f737 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -47,6 +47,8 @@ static DEFINE_PCI_DEVICE_TABLE(rtl8180_table) = {
47 { PCI_DEVICE(0x1799, 0x6001) }, 47 { PCI_DEVICE(0x1799, 0x6001) },
48 { PCI_DEVICE(0x1799, 0x6020) }, 48 { PCI_DEVICE(0x1799, 0x6020) },
49 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x3300) }, 49 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x3300) },
50 { PCI_DEVICE(0x1186, 0x3301) },
51 { PCI_DEVICE(0x1432, 0x7106) },
50 { } 52 { }
51}; 53};
52 54
@@ -1076,7 +1078,7 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
1076 if (!is_valid_ether_addr(mac_addr)) { 1078 if (!is_valid_ether_addr(mac_addr)) {
1077 printk(KERN_WARNING "%s (rtl8180): Invalid hwaddr! Using" 1079 printk(KERN_WARNING "%s (rtl8180): Invalid hwaddr! Using"
1078 " randomly generated MAC addr\n", pci_name(pdev)); 1080 " randomly generated MAC addr\n", pci_name(pdev));
1079 random_ether_addr(mac_addr); 1081 eth_random_addr(mac_addr);
1080 } 1082 }
1081 SET_IEEE80211_PERM_ADDR(dev, mac_addr); 1083 SET_IEEE80211_PERM_ADDR(dev, mac_addr);
1082 1084
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 4fb1ca1b86b9..71a30b026089 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -1486,7 +1486,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
1486 if (!is_valid_ether_addr(mac_addr)) { 1486 if (!is_valid_ether_addr(mac_addr)) {
1487 printk(KERN_WARNING "rtl8187: Invalid hwaddr! Using randomly " 1487 printk(KERN_WARNING "rtl8187: Invalid hwaddr! Using randomly "
1488 "generated MAC address\n"); 1488 "generated MAC address\n");
1489 random_ether_addr(mac_addr); 1489 eth_random_addr(mac_addr);
1490 } 1490 }
1491 SET_IEEE80211_PERM_ADDR(dev, mac_addr); 1491 SET_IEEE80211_PERM_ADDR(dev, mac_addr);
1492 1492
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index f4c852c6749b..58e1f7bb4df1 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -907,7 +907,7 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
907 struct ieee80211_hdr *hdr = rtl_get_hdr(skb); 907 struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
908 struct rtl_priv *rtlpriv = rtl_priv(hw); 908 struct rtl_priv *rtlpriv = rtl_priv(hw);
909 __le16 fc = hdr->frame_control; 909 __le16 fc = hdr->frame_control;
910 u8 *act = (u8 *) (((u8 *) skb->data + MAC80211_3ADDR_LEN)); 910 u8 *act = (u8 *)skb->data + MAC80211_3ADDR_LEN;
911 u8 category; 911 u8 category;
912 912
913 if (!ieee80211_is_action(fc)) 913 if (!ieee80211_is_action(fc))
diff --git a/drivers/net/wireless/rtlwifi/cam.c b/drivers/net/wireless/rtlwifi/cam.c
index 3d8cc4a0c86d..5b4b4d4eaf9e 100644
--- a/drivers/net/wireless/rtlwifi/cam.c
+++ b/drivers/net/wireless/rtlwifi/cam.c
@@ -128,7 +128,7 @@ u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
128 u32 us_config; 128 u32 us_config;
129 struct rtl_priv *rtlpriv = rtl_priv(hw); 129 struct rtl_priv *rtlpriv = rtl_priv(hw);
130 130
131 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, 131 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
132 "EntryNo:%x, ulKeyId=%x, ulEncAlg=%x, ulUseDK=%x MacAddr %pM\n", 132 "EntryNo:%x, ulKeyId=%x, ulEncAlg=%x, ulUseDK=%x MacAddr %pM\n",
133 ul_entry_idx, ul_key_id, ul_enc_alg, 133 ul_entry_idx, ul_key_id, ul_enc_alg,
134 ul_default_key, mac_addr); 134 ul_default_key, mac_addr);
@@ -146,7 +146,7 @@ u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
146 } 146 }
147 147
148 rtl_cam_program_entry(hw, ul_entry_idx, mac_addr, 148 rtl_cam_program_entry(hw, ul_entry_idx, mac_addr,
149 (u8 *) key_content, us_config); 149 key_content, us_config);
150 150
151 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "<===\n"); 151 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "<===\n");
152 152
@@ -342,7 +342,8 @@ void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr)
342 /* Remove from HW Security CAM */ 342 /* Remove from HW Security CAM */
343 memset(rtlpriv->sec.hwsec_cam_sta_addr[i], 0, ETH_ALEN); 343 memset(rtlpriv->sec.hwsec_cam_sta_addr[i], 0, ETH_ALEN);
344 rtlpriv->sec.hwsec_cam_bitmap &= ~(BIT(0) << i); 344 rtlpriv->sec.hwsec_cam_bitmap &= ~(BIT(0) << i);
345 pr_info("&&&&&&&&&del entry %d\n", i); 345 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
346 "del CAM entry %d\n", i);
346 } 347 }
347 } 348 }
348 return; 349 return;
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 278e9f957e0d..a18ad2a98938 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -680,7 +680,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
680 680
681 mac->short_preamble = bss_conf->use_short_preamble; 681 mac->short_preamble = bss_conf->use_short_preamble;
682 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACK_PREAMBLE, 682 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACK_PREAMBLE,
683 (u8 *) (&mac->short_preamble)); 683 &mac->short_preamble);
684 } 684 }
685 685
686 if (changed & BSS_CHANGED_ERP_SLOT) { 686 if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -693,7 +693,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
693 mac->slot_time = RTL_SLOT_TIME_20; 693 mac->slot_time = RTL_SLOT_TIME_20;
694 694
695 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, 695 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
696 (u8 *) (&mac->slot_time)); 696 &mac->slot_time);
697 } 697 }
698 698
699 if (changed & BSS_CHANGED_HT) { 699 if (changed & BSS_CHANGED_HT) {
@@ -713,7 +713,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
713 rcu_read_unlock(); 713 rcu_read_unlock();
714 714
715 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SHORTGI_DENSITY, 715 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SHORTGI_DENSITY,
716 (u8 *) (&mac->max_mss_density)); 716 &mac->max_mss_density);
717 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AMPDU_FACTOR, 717 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AMPDU_FACTOR,
718 &mac->current_ampdu_factor); 718 &mac->current_ampdu_factor);
719 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AMPDU_MIN_SPACE, 719 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AMPDU_MIN_SPACE,
@@ -801,7 +801,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
801 u8 mstatus = RT_MEDIA_CONNECT; 801 u8 mstatus = RT_MEDIA_CONNECT;
802 rtlpriv->cfg->ops->set_hw_reg(hw, 802 rtlpriv->cfg->ops->set_hw_reg(hw,
803 HW_VAR_H2C_FW_JOINBSSRPT, 803 HW_VAR_H2C_FW_JOINBSSRPT,
804 (u8 *) (&mstatus)); 804 &mstatus);
805 ppsc->report_linked = true; 805 ppsc->report_linked = true;
806 } 806 }
807 } else { 807 } else {
@@ -809,7 +809,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
809 u8 mstatus = RT_MEDIA_DISCONNECT; 809 u8 mstatus = RT_MEDIA_DISCONNECT;
810 rtlpriv->cfg->ops->set_hw_reg(hw, 810 rtlpriv->cfg->ops->set_hw_reg(hw,
811 HW_VAR_H2C_FW_JOINBSSRPT, 811 HW_VAR_H2C_FW_JOINBSSRPT,
812 (u8 *)(&mstatus)); 812 &mstatus);
813 ppsc->report_linked = false; 813 ppsc->report_linked = false;
814 } 814 }
815 } 815 }
@@ -836,7 +836,7 @@ static void rtl_op_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
836 u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0; 836 u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0;
837 837
838 mac->tsf = tsf; 838 mac->tsf = tsf;
839 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_CORRECT_TSF, (u8 *) (&bibss)); 839 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_CORRECT_TSF, &bibss);
840} 840}
841 841
842static void rtl_op_reset_tsf(struct ieee80211_hw *hw, 842static void rtl_op_reset_tsf(struct ieee80211_hw *hw,
@@ -845,7 +845,7 @@ static void rtl_op_reset_tsf(struct ieee80211_hw *hw,
845 struct rtl_priv *rtlpriv = rtl_priv(hw); 845 struct rtl_priv *rtlpriv = rtl_priv(hw);
846 u8 tmp = 0; 846 u8 tmp = 0;
847 847
848 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_DUAL_TSF_RST, (u8 *) (&tmp)); 848 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_DUAL_TSF_RST, &tmp);
849} 849}
850 850
851static void rtl_op_sta_notify(struct ieee80211_hw *hw, 851static void rtl_op_sta_notify(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index 1f143800a8d7..8e2f9afb125a 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -352,7 +352,7 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
352 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_EFUSE_BYTES, 352 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_EFUSE_BYTES,
353 (u8 *)&efuse_utilized); 353 (u8 *)&efuse_utilized);
354 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_EFUSE_USAGE, 354 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_EFUSE_USAGE,
355 (u8 *)&efuse_usage); 355 &efuse_usage);
356done: 356done:
357 for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) 357 for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++)
358 kfree(efuse_word[i]); 358 kfree(efuse_word[i]);
@@ -409,7 +409,7 @@ void efuse_shadow_read(struct ieee80211_hw *hw, u8 type,
409 else if (type == 2) 409 else if (type == 2)
410 efuse_shadow_read_2byte(hw, offset, (u16 *) value); 410 efuse_shadow_read_2byte(hw, offset, (u16 *) value);
411 else if (type == 4) 411 else if (type == 4)
412 efuse_shadow_read_4byte(hw, offset, (u32 *) value); 412 efuse_shadow_read_4byte(hw, offset, value);
413 413
414} 414}
415 415
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 2062ea1d7c80..31138fdad1f7 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -756,10 +756,10 @@ done:
756 if (index == rtlpci->rxringcount - 1) 756 if (index == rtlpci->rxringcount - 1)
757 rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false, 757 rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false,
758 HW_DESC_RXERO, 758 HW_DESC_RXERO,
759 (u8 *)&tmp_one); 759 &tmp_one);
760 760
761 rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false, HW_DESC_RXOWN, 761 rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false, HW_DESC_RXOWN,
762 (u8 *)&tmp_one); 762 &tmp_one);
763 763
764 index = (index + 1) % rtlpci->rxringcount; 764 index = (index + 1) % rtlpci->rxringcount;
765 } 765 }
@@ -934,7 +934,7 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
934 __skb_queue_tail(&ring->queue, pskb); 934 __skb_queue_tail(&ring->queue, pskb);
935 935
936 rtlpriv->cfg->ops->set_desc((u8 *) pdesc, true, HW_DESC_OWN, 936 rtlpriv->cfg->ops->set_desc((u8 *) pdesc, true, HW_DESC_OWN,
937 (u8 *)&temp_one); 937 &temp_one);
938 938
939 return; 939 return;
940} 940}
@@ -1126,11 +1126,11 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
1126 rxbuffersize); 1126 rxbuffersize);
1127 rtlpriv->cfg->ops->set_desc((u8 *) entry, false, 1127 rtlpriv->cfg->ops->set_desc((u8 *) entry, false,
1128 HW_DESC_RXOWN, 1128 HW_DESC_RXOWN,
1129 (u8 *)&tmp_one); 1129 &tmp_one);
1130 } 1130 }
1131 1131
1132 rtlpriv->cfg->ops->set_desc((u8 *) entry, false, 1132 rtlpriv->cfg->ops->set_desc((u8 *) entry, false,
1133 HW_DESC_RXERO, (u8 *)&tmp_one); 1133 HW_DESC_RXERO, &tmp_one);
1134 } 1134 }
1135 return 0; 1135 return 0;
1136} 1136}
@@ -1263,7 +1263,7 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
1263 rtlpriv->cfg->ops->set_desc((u8 *) entry, 1263 rtlpriv->cfg->ops->set_desc((u8 *) entry,
1264 false, 1264 false,
1265 HW_DESC_RXOWN, 1265 HW_DESC_RXOWN,
1266 (u8 *)&tmp_one); 1266 &tmp_one);
1267 } 1267 }
1268 rtlpci->rx_ring[rx_queue_idx].idx = 0; 1268 rtlpci->rx_ring[rx_queue_idx].idx = 0;
1269 } 1269 }
@@ -1273,17 +1273,18 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
1273 *after reset, release previous pending packet, 1273 *after reset, release previous pending packet,
1274 *and force the tx idx to the first one 1274 *and force the tx idx to the first one
1275 */ 1275 */
1276 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
1277 for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) { 1276 for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
1278 if (rtlpci->tx_ring[i].desc) { 1277 if (rtlpci->tx_ring[i].desc) {
1279 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[i]; 1278 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[i];
1280 1279
1281 while (skb_queue_len(&ring->queue)) { 1280 while (skb_queue_len(&ring->queue)) {
1282 struct rtl_tx_desc *entry = 1281 struct rtl_tx_desc *entry;
1283 &ring->desc[ring->idx]; 1282 struct sk_buff *skb;
1284 struct sk_buff *skb =
1285 __skb_dequeue(&ring->queue);
1286 1283
1284 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock,
1285 flags);
1286 entry = &ring->desc[ring->idx];
1287 skb = __skb_dequeue(&ring->queue);
1287 pci_unmap_single(rtlpci->pdev, 1288 pci_unmap_single(rtlpci->pdev,
1288 rtlpriv->cfg->ops-> 1289 rtlpriv->cfg->ops->
1289 get_desc((u8 *) 1290 get_desc((u8 *)
@@ -1291,15 +1292,15 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
1291 true, 1292 true,
1292 HW_DESC_TXBUFF_ADDR), 1293 HW_DESC_TXBUFF_ADDR),
1293 skb->len, PCI_DMA_TODEVICE); 1294 skb->len, PCI_DMA_TODEVICE);
1294 kfree_skb(skb);
1295 ring->idx = (ring->idx + 1) % ring->entries; 1295 ring->idx = (ring->idx + 1) % ring->entries;
1296 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock,
1297 flags);
1298 kfree_skb(skb);
1296 } 1299 }
1297 ring->idx = 0; 1300 ring->idx = 0;
1298 } 1301 }
1299 } 1302 }
1300 1303
1301 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1302
1303 return 0; 1304 return 0;
1304} 1305}
1305 1306
@@ -1422,7 +1423,7 @@ static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
1422 __skb_queue_tail(&ring->queue, skb); 1423 __skb_queue_tail(&ring->queue, skb);
1423 1424
1424 rtlpriv->cfg->ops->set_desc((u8 *)pdesc, true, 1425 rtlpriv->cfg->ops->set_desc((u8 *)pdesc, true,
1425 HW_DESC_OWN, (u8 *)&temp_one); 1426 HW_DESC_OWN, &temp_one);
1426 1427
1427 1428
1428 if ((ring->entries - skb_queue_len(&ring->queue)) < 2 && 1429 if ((ring->entries - skb_queue_len(&ring->queue)) < 2 &&
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index 5ae26647f340..13ad33e85577 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -333,10 +333,10 @@ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
333 rpwm_val = 0x0C; /* RF on */ 333 rpwm_val = 0x0C; /* RF on */
334 fw_pwrmode = FW_PS_ACTIVE_MODE; 334 fw_pwrmode = FW_PS_ACTIVE_MODE;
335 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, 335 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
336 (u8 *) (&rpwm_val)); 336 &rpwm_val);
337 rtlpriv->cfg->ops->set_hw_reg(hw, 337 rtlpriv->cfg->ops->set_hw_reg(hw,
338 HW_VAR_H2C_FW_PWRMODE, 338 HW_VAR_H2C_FW_PWRMODE,
339 (u8 *) (&fw_pwrmode)); 339 &fw_pwrmode);
340 fw_current_inps = false; 340 fw_current_inps = false;
341 341
342 rtlpriv->cfg->ops->set_hw_reg(hw, 342 rtlpriv->cfg->ops->set_hw_reg(hw,
@@ -356,11 +356,11 @@ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
356 (u8 *) (&fw_current_inps)); 356 (u8 *) (&fw_current_inps));
357 rtlpriv->cfg->ops->set_hw_reg(hw, 357 rtlpriv->cfg->ops->set_hw_reg(hw,
358 HW_VAR_H2C_FW_PWRMODE, 358 HW_VAR_H2C_FW_PWRMODE,
359 (u8 *) (&ppsc->fwctrl_psmode)); 359 &ppsc->fwctrl_psmode);
360 360
361 rtlpriv->cfg->ops->set_hw_reg(hw, 361 rtlpriv->cfg->ops->set_hw_reg(hw,
362 HW_VAR_SET_RPWM, 362 HW_VAR_SET_RPWM,
363 (u8 *) (&rpwm_val)); 363 &rpwm_val);
364 } else { 364 } else {
365 /* Reset the power save related parameters. */ 365 /* Reset the power save related parameters. */
366 ppsc->dot11_psmode = EACTIVE; 366 ppsc->dot11_psmode = EACTIVE;
@@ -446,7 +446,7 @@ void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len)
446{ 446{
447 struct rtl_priv *rtlpriv = rtl_priv(hw); 447 struct rtl_priv *rtlpriv = rtl_priv(hw);
448 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 448 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
449 struct ieee80211_hdr *hdr = (void *) data; 449 struct ieee80211_hdr *hdr = data;
450 struct ieee80211_tim_ie *tim_ie; 450 struct ieee80211_tim_ie *tim_ie;
451 u8 *tim; 451 u8 *tim;
452 u8 tim_len; 452 u8 tim_len;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index f7f48c7ac854..a45afda8259c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -656,9 +656,8 @@ static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
656 } else { 656 } else {
657 if (rtlpriv->dm.current_turbo_edca) { 657 if (rtlpriv->dm.current_turbo_edca) {
658 u8 tmp = AC0_BE; 658 u8 tmp = AC0_BE;
659 rtlpriv->cfg->ops->set_hw_reg(hw, 659 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
660 HW_VAR_AC_PARAM, 660 &tmp);
661 (u8 *) (&tmp));
662 rtlpriv->dm.current_turbo_edca = false; 661 rtlpriv->dm.current_turbo_edca = false;
663 } 662 }
664 } 663 }
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index 692c8ef5ee89..44febfde9493 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -168,7 +168,7 @@ static void _rtl92c_write_fw(struct ieee80211_hw *hw,
168{ 168{
169 struct rtl_priv *rtlpriv = rtl_priv(hw); 169 struct rtl_priv *rtlpriv = rtl_priv(hw);
170 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 170 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
171 u8 *bufferPtr = (u8 *) buffer; 171 u8 *bufferPtr = buffer;
172 172
173 RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes\n", size); 173 RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes\n", size);
174 174
@@ -262,7 +262,7 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
262 return 1; 262 return 1;
263 263
264 pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware; 264 pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
265 pfwdata = (u8 *) rtlhal->pfirmware; 265 pfwdata = rtlhal->pfirmware;
266 fwsize = rtlhal->fwsize; 266 fwsize = rtlhal->fwsize;
267 267
268 if (IS_FW_HEADER_EXIST(pfwheader)) { 268 if (IS_FW_HEADER_EXIST(pfwheader)) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index 5c4d9bc040f1..bd0da7ef290b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -214,13 +214,13 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
214 for (e_aci = 0; e_aci < AC_MAX; e_aci++) { 214 for (e_aci = 0; e_aci < AC_MAX; e_aci++) {
215 rtlpriv->cfg->ops->set_hw_reg(hw, 215 rtlpriv->cfg->ops->set_hw_reg(hw,
216 HW_VAR_AC_PARAM, 216 HW_VAR_AC_PARAM,
217 (u8 *) (&e_aci)); 217 &e_aci);
218 } 218 }
219 break; 219 break;
220 } 220 }
221 case HW_VAR_ACK_PREAMBLE:{ 221 case HW_VAR_ACK_PREAMBLE:{
222 u8 reg_tmp; 222 u8 reg_tmp;
223 u8 short_preamble = (bool) (*(u8 *) val); 223 u8 short_preamble = (bool)*val;
224 reg_tmp = (mac->cur_40_prime_sc) << 5; 224 reg_tmp = (mac->cur_40_prime_sc) << 5;
225 if (short_preamble) 225 if (short_preamble)
226 reg_tmp |= 0x80; 226 reg_tmp |= 0x80;
@@ -232,7 +232,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
232 u8 min_spacing_to_set; 232 u8 min_spacing_to_set;
233 u8 sec_min_space; 233 u8 sec_min_space;
234 234
235 min_spacing_to_set = *((u8 *) val); 235 min_spacing_to_set = *val;
236 if (min_spacing_to_set <= 7) { 236 if (min_spacing_to_set <= 7) {
237 sec_min_space = 0; 237 sec_min_space = 0;
238 238
@@ -257,7 +257,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
257 case HW_VAR_SHORTGI_DENSITY:{ 257 case HW_VAR_SHORTGI_DENSITY:{
258 u8 density_to_set; 258 u8 density_to_set;
259 259
260 density_to_set = *((u8 *) val); 260 density_to_set = *val;
261 mac->min_space_cfg |= (density_to_set << 3); 261 mac->min_space_cfg |= (density_to_set << 3);
262 262
263 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, 263 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
@@ -284,7 +284,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
284 else 284 else
285 p_regtoset = regtoset_normal; 285 p_regtoset = regtoset_normal;
286 286
287 factor_toset = *((u8 *) val); 287 factor_toset = *(val);
288 if (factor_toset <= 3) { 288 if (factor_toset <= 3) {
289 factor_toset = (1 << (factor_toset + 2)); 289 factor_toset = (1 << (factor_toset + 2));
290 if (factor_toset > 0xf) 290 if (factor_toset > 0xf)
@@ -316,17 +316,17 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
316 break; 316 break;
317 } 317 }
318 case HW_VAR_AC_PARAM:{ 318 case HW_VAR_AC_PARAM:{
319 u8 e_aci = *((u8 *) val); 319 u8 e_aci = *(val);
320 rtl92c_dm_init_edca_turbo(hw); 320 rtl92c_dm_init_edca_turbo(hw);
321 321
322 if (rtlpci->acm_method != eAcmWay2_SW) 322 if (rtlpci->acm_method != eAcmWay2_SW)
323 rtlpriv->cfg->ops->set_hw_reg(hw, 323 rtlpriv->cfg->ops->set_hw_reg(hw,
324 HW_VAR_ACM_CTRL, 324 HW_VAR_ACM_CTRL,
325 (u8 *) (&e_aci)); 325 (&e_aci));
326 break; 326 break;
327 } 327 }
328 case HW_VAR_ACM_CTRL:{ 328 case HW_VAR_ACM_CTRL:{
329 u8 e_aci = *((u8 *) val); 329 u8 e_aci = *(val);
330 union aci_aifsn *p_aci_aifsn = 330 union aci_aifsn *p_aci_aifsn =
331 (union aci_aifsn *)(&(mac->ac[0].aifs)); 331 (union aci_aifsn *)(&(mac->ac[0].aifs));
332 u8 acm = p_aci_aifsn->f.acm; 332 u8 acm = p_aci_aifsn->f.acm;
@@ -382,7 +382,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
382 break; 382 break;
383 } 383 }
384 case HW_VAR_RETRY_LIMIT:{ 384 case HW_VAR_RETRY_LIMIT:{
385 u8 retry_limit = ((u8 *) (val))[0]; 385 u8 retry_limit = val[0];
386 386
387 rtl_write_word(rtlpriv, REG_RL, 387 rtl_write_word(rtlpriv, REG_RL,
388 retry_limit << RETRY_LIMIT_SHORT_SHIFT | 388 retry_limit << RETRY_LIMIT_SHORT_SHIFT |
@@ -396,13 +396,13 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
396 rtlefuse->efuse_usedbytes = *((u16 *) val); 396 rtlefuse->efuse_usedbytes = *((u16 *) val);
397 break; 397 break;
398 case HW_VAR_EFUSE_USAGE: 398 case HW_VAR_EFUSE_USAGE:
399 rtlefuse->efuse_usedpercentage = *((u8 *) val); 399 rtlefuse->efuse_usedpercentage = *val;
400 break; 400 break;
401 case HW_VAR_IO_CMD: 401 case HW_VAR_IO_CMD:
402 rtl92c_phy_set_io_cmd(hw, (*(enum io_type *)val)); 402 rtl92c_phy_set_io_cmd(hw, (*(enum io_type *)val));
403 break; 403 break;
404 case HW_VAR_WPA_CONFIG: 404 case HW_VAR_WPA_CONFIG:
405 rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val)); 405 rtl_write_byte(rtlpriv, REG_SECCFG, *val);
406 break; 406 break;
407 case HW_VAR_SET_RPWM:{ 407 case HW_VAR_SET_RPWM:{
408 u8 rpwm_val; 408 u8 rpwm_val;
@@ -411,31 +411,30 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
411 udelay(1); 411 udelay(1);
412 412
413 if (rpwm_val & BIT(7)) { 413 if (rpwm_val & BIT(7)) {
414 rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, 414 rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val);
415 (*(u8 *) val));
416 } else { 415 } else {
417 rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, 416 rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
418 ((*(u8 *) val) | BIT(7))); 417 *val | BIT(7));
419 } 418 }
420 419
421 break; 420 break;
422 } 421 }
423 case HW_VAR_H2C_FW_PWRMODE:{ 422 case HW_VAR_H2C_FW_PWRMODE:{
424 u8 psmode = (*(u8 *) val); 423 u8 psmode = *val;
425 424
426 if ((psmode != FW_PS_ACTIVE_MODE) && 425 if ((psmode != FW_PS_ACTIVE_MODE) &&
427 (!IS_92C_SERIAL(rtlhal->version))) { 426 (!IS_92C_SERIAL(rtlhal->version))) {
428 rtl92c_dm_rf_saving(hw, true); 427 rtl92c_dm_rf_saving(hw, true);
429 } 428 }
430 429
431 rtl92c_set_fw_pwrmode_cmd(hw, (*(u8 *) val)); 430 rtl92c_set_fw_pwrmode_cmd(hw, *val);
432 break; 431 break;
433 } 432 }
434 case HW_VAR_FW_PSMODE_STATUS: 433 case HW_VAR_FW_PSMODE_STATUS:
435 ppsc->fw_current_inpsmode = *((bool *) val); 434 ppsc->fw_current_inpsmode = *((bool *) val);
436 break; 435 break;
437 case HW_VAR_H2C_FW_JOINBSSRPT:{ 436 case HW_VAR_H2C_FW_JOINBSSRPT:{
438 u8 mstatus = (*(u8 *) val); 437 u8 mstatus = *val;
439 u8 tmp_regcr, tmp_reg422; 438 u8 tmp_regcr, tmp_reg422;
440 bool recover = false; 439 bool recover = false;
441 440
@@ -472,7 +471,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
472 rtl_write_byte(rtlpriv, REG_CR + 1, 471 rtl_write_byte(rtlpriv, REG_CR + 1,
473 (tmp_regcr & ~(BIT(0)))); 472 (tmp_regcr & ~(BIT(0))));
474 } 473 }
475 rtl92c_set_fw_joinbss_report_cmd(hw, (*(u8 *) val)); 474 rtl92c_set_fw_joinbss_report_cmd(hw, *val);
476 475
477 break; 476 break;
478 } 477 }
@@ -486,7 +485,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
486 break; 485 break;
487 } 486 }
488 case HW_VAR_CORRECT_TSF:{ 487 case HW_VAR_CORRECT_TSF:{
489 u8 btype_ibss = ((u8 *) (val))[0]; 488 u8 btype_ibss = val[0];
490 489
491 if (btype_ibss) 490 if (btype_ibss)
492 _rtl92ce_stop_tx_beacon(hw); 491 _rtl92ce_stop_tx_beacon(hw);
@@ -1589,10 +1588,10 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
1589 rtlefuse->autoload_failflag, 1588 rtlefuse->autoload_failflag,
1590 hwinfo); 1589 hwinfo);
1591 1590
1592 rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN]; 1591 rtlefuse->eeprom_channelplan = *&hwinfo[EEPROM_CHANNELPLAN];
1593 rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION]; 1592 rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
1594 rtlefuse->txpwr_fromeprom = true; 1593 rtlefuse->txpwr_fromeprom = true;
1595 rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID]; 1594 rtlefuse->eeprom_oemid = *&hwinfo[EEPROM_CUSTOMER_ID];
1596 1595
1597 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, 1596 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1598 "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid); 1597 "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
@@ -1939,7 +1938,7 @@ void rtl92ce_update_channel_access_setting(struct ieee80211_hw *hw)
1939 u16 sifs_timer; 1938 u16 sifs_timer;
1940 1939
1941 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, 1940 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
1942 (u8 *)&mac->slot_time); 1941 &mac->slot_time);
1943 if (!mac->ht_enable) 1942 if (!mac->ht_enable)
1944 sifs_timer = 0x0a0a; 1943 sifs_timer = 0x0a0a;
1945 else 1944 else
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index 3af874e69595..52166640f167 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -605,7 +605,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
605 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 605 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
606 bool defaultadapter = true; 606 bool defaultadapter = true;
607 struct ieee80211_sta *sta; 607 struct ieee80211_sta *sta;
608 u8 *pdesc = (u8 *) pdesc_tx; 608 u8 *pdesc = pdesc_tx;
609 u16 seq_number; 609 u16 seq_number;
610 __le16 fc = hdr->frame_control; 610 __le16 fc = hdr->frame_control;
611 u8 fw_qsel = _rtl92ce_map_hwqueue_to_fwqueue(skb, hw_queue); 611 u8 fw_qsel = _rtl92ce_map_hwqueue_to_fwqueue(skb, hw_queue);
@@ -806,7 +806,7 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
806 806
807 SET_TX_DESC_OWN(pdesc, 1); 807 SET_TX_DESC_OWN(pdesc, 1);
808 808
809 SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len)); 809 SET_TX_DESC_PKT_SIZE(pdesc, (u16) (skb->len));
810 810
811 SET_TX_DESC_FIRST_SEG(pdesc, 1); 811 SET_TX_DESC_FIRST_SEG(pdesc, 1);
812 SET_TX_DESC_LAST_SEG(pdesc, 1); 812 SET_TX_DESC_LAST_SEG(pdesc, 1);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 0c74d4f2eeb4..4bbb711a36c5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -381,11 +381,11 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
381 rtlefuse->eeprom_did = le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_DID]); 381 rtlefuse->eeprom_did = le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_DID]);
382 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, " VID = 0x%02x PID = 0x%02x\n", 382 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, " VID = 0x%02x PID = 0x%02x\n",
383 rtlefuse->eeprom_vid, rtlefuse->eeprom_did); 383 rtlefuse->eeprom_vid, rtlefuse->eeprom_did);
384 rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN]; 384 rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN];
385 rtlefuse->eeprom_version = 385 rtlefuse->eeprom_version =
386 le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_VERSION]); 386 le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_VERSION]);
387 rtlefuse->txpwr_fromeprom = true; 387 rtlefuse->txpwr_fromeprom = true;
388 rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID]; 388 rtlefuse->eeprom_oemid = hwinfo[EEPROM_CUSTOMER_ID];
389 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM Customer ID: 0x%2x\n", 389 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM Customer ID: 0x%2x\n",
390 rtlefuse->eeprom_oemid); 390 rtlefuse->eeprom_oemid);
391 if (rtlhal->oem_id == RT_CID_DEFAULT) { 391 if (rtlhal->oem_id == RT_CID_DEFAULT) {
@@ -1660,7 +1660,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1660 for (e_aci = 0; e_aci < AC_MAX; e_aci++) 1660 for (e_aci = 0; e_aci < AC_MAX; e_aci++)
1661 rtlpriv->cfg->ops->set_hw_reg(hw, 1661 rtlpriv->cfg->ops->set_hw_reg(hw,
1662 HW_VAR_AC_PARAM, 1662 HW_VAR_AC_PARAM,
1663 (u8 *)(&e_aci)); 1663 &e_aci);
1664 } else { 1664 } else {
1665 u8 sifstime = 0; 1665 u8 sifstime = 0;
1666 u8 u1bAIFS; 1666 u8 u1bAIFS;
@@ -1685,7 +1685,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1685 } 1685 }
1686 case HW_VAR_ACK_PREAMBLE:{ 1686 case HW_VAR_ACK_PREAMBLE:{
1687 u8 reg_tmp; 1687 u8 reg_tmp;
1688 u8 short_preamble = (bool) (*(u8 *) val); 1688 u8 short_preamble = (bool)*val;
1689 reg_tmp = 0; 1689 reg_tmp = 0;
1690 if (short_preamble) 1690 if (short_preamble)
1691 reg_tmp |= 0x80; 1691 reg_tmp |= 0x80;
@@ -1696,7 +1696,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1696 u8 min_spacing_to_set; 1696 u8 min_spacing_to_set;
1697 u8 sec_min_space; 1697 u8 sec_min_space;
1698 1698
1699 min_spacing_to_set = *((u8 *) val); 1699 min_spacing_to_set = *val;
1700 if (min_spacing_to_set <= 7) { 1700 if (min_spacing_to_set <= 7) {
1701 switch (rtlpriv->sec.pairwise_enc_algorithm) { 1701 switch (rtlpriv->sec.pairwise_enc_algorithm) {
1702 case NO_ENCRYPTION: 1702 case NO_ENCRYPTION:
@@ -1729,7 +1729,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1729 case HW_VAR_SHORTGI_DENSITY:{ 1729 case HW_VAR_SHORTGI_DENSITY:{
1730 u8 density_to_set; 1730 u8 density_to_set;
1731 1731
1732 density_to_set = *((u8 *) val); 1732 density_to_set = *val;
1733 density_to_set &= 0x1f; 1733 density_to_set &= 0x1f;
1734 mac->min_space_cfg &= 0x07; 1734 mac->min_space_cfg &= 0x07;
1735 mac->min_space_cfg |= (density_to_set << 3); 1735 mac->min_space_cfg |= (density_to_set << 3);
@@ -1747,7 +1747,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1747 u8 index = 0; 1747 u8 index = 0;
1748 1748
1749 p_regtoset = regtoset_normal; 1749 p_regtoset = regtoset_normal;
1750 factor_toset = *((u8 *) val); 1750 factor_toset = *val;
1751 if (factor_toset <= 3) { 1751 if (factor_toset <= 3) {
1752 factor_toset = (1 << (factor_toset + 2)); 1752 factor_toset = (1 << (factor_toset + 2));
1753 if (factor_toset > 0xf) 1753 if (factor_toset > 0xf)
@@ -1774,7 +1774,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1774 break; 1774 break;
1775 } 1775 }
1776 case HW_VAR_AC_PARAM:{ 1776 case HW_VAR_AC_PARAM:{
1777 u8 e_aci = *((u8 *) val); 1777 u8 e_aci = *val;
1778 u32 u4b_ac_param; 1778 u32 u4b_ac_param;
1779 u16 cw_min = le16_to_cpu(mac->ac[e_aci].cw_min); 1779 u16 cw_min = le16_to_cpu(mac->ac[e_aci].cw_min);
1780 u16 cw_max = le16_to_cpu(mac->ac[e_aci].cw_max); 1780 u16 cw_max = le16_to_cpu(mac->ac[e_aci].cw_max);
@@ -1814,11 +1814,11 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1814 } 1814 }
1815 if (rtlusb->acm_method != eAcmWay2_SW) 1815 if (rtlusb->acm_method != eAcmWay2_SW)
1816 rtlpriv->cfg->ops->set_hw_reg(hw, 1816 rtlpriv->cfg->ops->set_hw_reg(hw,
1817 HW_VAR_ACM_CTRL, (u8 *)(&e_aci)); 1817 HW_VAR_ACM_CTRL, &e_aci);
1818 break; 1818 break;
1819 } 1819 }
1820 case HW_VAR_ACM_CTRL:{ 1820 case HW_VAR_ACM_CTRL:{
1821 u8 e_aci = *((u8 *) val); 1821 u8 e_aci = *val;
1822 union aci_aifsn *p_aci_aifsn = (union aci_aifsn *) 1822 union aci_aifsn *p_aci_aifsn = (union aci_aifsn *)
1823 (&(mac->ac[0].aifs)); 1823 (&(mac->ac[0].aifs));
1824 u8 acm = p_aci_aifsn->f.acm; 1824 u8 acm = p_aci_aifsn->f.acm;
@@ -1874,7 +1874,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1874 break; 1874 break;
1875 } 1875 }
1876 case HW_VAR_RETRY_LIMIT:{ 1876 case HW_VAR_RETRY_LIMIT:{
1877 u8 retry_limit = ((u8 *) (val))[0]; 1877 u8 retry_limit = val[0];
1878 1878
1879 rtl_write_word(rtlpriv, REG_RL, 1879 rtl_write_word(rtlpriv, REG_RL,
1880 retry_limit << RETRY_LIMIT_SHORT_SHIFT | 1880 retry_limit << RETRY_LIMIT_SHORT_SHIFT |
@@ -1891,39 +1891,38 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1891 rtlefuse->efuse_usedbytes = *((u16 *) val); 1891 rtlefuse->efuse_usedbytes = *((u16 *) val);
1892 break; 1892 break;
1893 case HW_VAR_EFUSE_USAGE: 1893 case HW_VAR_EFUSE_USAGE:
1894 rtlefuse->efuse_usedpercentage = *((u8 *) val); 1894 rtlefuse->efuse_usedpercentage = *val;
1895 break; 1895 break;
1896 case HW_VAR_IO_CMD: 1896 case HW_VAR_IO_CMD:
1897 rtl92c_phy_set_io_cmd(hw, (*(enum io_type *)val)); 1897 rtl92c_phy_set_io_cmd(hw, (*(enum io_type *)val));
1898 break; 1898 break;
1899 case HW_VAR_WPA_CONFIG: 1899 case HW_VAR_WPA_CONFIG:
1900 rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val)); 1900 rtl_write_byte(rtlpriv, REG_SECCFG, *val);
1901 break; 1901 break;
1902 case HW_VAR_SET_RPWM:{ 1902 case HW_VAR_SET_RPWM:{
1903 u8 rpwm_val = rtl_read_byte(rtlpriv, REG_USB_HRPWM); 1903 u8 rpwm_val = rtl_read_byte(rtlpriv, REG_USB_HRPWM);
1904 1904
1905 if (rpwm_val & BIT(7)) 1905 if (rpwm_val & BIT(7))
1906 rtl_write_byte(rtlpriv, REG_USB_HRPWM, 1906 rtl_write_byte(rtlpriv, REG_USB_HRPWM, *val);
1907 (*(u8 *)val));
1908 else 1907 else
1909 rtl_write_byte(rtlpriv, REG_USB_HRPWM, 1908 rtl_write_byte(rtlpriv, REG_USB_HRPWM,
1910 ((*(u8 *)val) | BIT(7))); 1909 *val | BIT(7));
1911 break; 1910 break;
1912 } 1911 }
1913 case HW_VAR_H2C_FW_PWRMODE:{ 1912 case HW_VAR_H2C_FW_PWRMODE:{
1914 u8 psmode = (*(u8 *) val); 1913 u8 psmode = *val;
1915 1914
1916 if ((psmode != FW_PS_ACTIVE_MODE) && 1915 if ((psmode != FW_PS_ACTIVE_MODE) &&
1917 (!IS_92C_SERIAL(rtlhal->version))) 1916 (!IS_92C_SERIAL(rtlhal->version)))
1918 rtl92c_dm_rf_saving(hw, true); 1917 rtl92c_dm_rf_saving(hw, true);
1919 rtl92c_set_fw_pwrmode_cmd(hw, (*(u8 *) val)); 1918 rtl92c_set_fw_pwrmode_cmd(hw, (*val));
1920 break; 1919 break;
1921 } 1920 }
1922 case HW_VAR_FW_PSMODE_STATUS: 1921 case HW_VAR_FW_PSMODE_STATUS:
1923 ppsc->fw_current_inpsmode = *((bool *) val); 1922 ppsc->fw_current_inpsmode = *((bool *) val);
1924 break; 1923 break;
1925 case HW_VAR_H2C_FW_JOINBSSRPT:{ 1924 case HW_VAR_H2C_FW_JOINBSSRPT:{
1926 u8 mstatus = (*(u8 *) val); 1925 u8 mstatus = *val;
1927 u8 tmp_reg422; 1926 u8 tmp_reg422;
1928 bool recover = false; 1927 bool recover = false;
1929 1928
@@ -1948,7 +1947,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1948 tmp_reg422 | BIT(6)); 1947 tmp_reg422 | BIT(6));
1949 rtl_write_byte(rtlpriv, REG_CR + 1, 0x02); 1948 rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
1950 } 1949 }
1951 rtl92c_set_fw_joinbss_report_cmd(hw, (*(u8 *) val)); 1950 rtl92c_set_fw_joinbss_report_cmd(hw, (*val));
1952 break; 1951 break;
1953 } 1952 }
1954 case HW_VAR_AID:{ 1953 case HW_VAR_AID:{
@@ -1961,7 +1960,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1961 break; 1960 break;
1962 } 1961 }
1963 case HW_VAR_CORRECT_TSF:{ 1962 case HW_VAR_CORRECT_TSF:{
1964 u8 btype_ibss = ((u8 *) (val))[0]; 1963 u8 btype_ibss = val[0];
1965 1964
1966 if (btype_ibss) 1965 if (btype_ibss)
1967 _rtl92cu_stop_tx_beacon(hw); 1966 _rtl92cu_stop_tx_beacon(hw);
@@ -2184,7 +2183,7 @@ void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw)
2184 u16 sifs_timer; 2183 u16 sifs_timer;
2185 2184
2186 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, 2185 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
2187 (u8 *)&mac->slot_time); 2186 &mac->slot_time);
2188 if (!mac->ht_enable) 2187 if (!mac->ht_enable)
2189 sifs_timer = 0x0a0a; 2188 sifs_timer = 0x0a0a;
2190 else 2189 else
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 21bc827c5fa6..2e6eb356a93e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -668,7 +668,7 @@ void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw,
668 SET_TX_DESC_RATE_ID(pdesc, 7); 668 SET_TX_DESC_RATE_ID(pdesc, 7);
669 SET_TX_DESC_MACID(pdesc, 0); 669 SET_TX_DESC_MACID(pdesc, 0);
670 SET_TX_DESC_OWN(pdesc, 1); 670 SET_TX_DESC_OWN(pdesc, 1);
671 SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len)); 671 SET_TX_DESC_PKT_SIZE(pdesc, (u16)skb->len);
672 SET_TX_DESC_FIRST_SEG(pdesc, 1); 672 SET_TX_DESC_FIRST_SEG(pdesc, 1);
673 SET_TX_DESC_LAST_SEG(pdesc, 1); 673 SET_TX_DESC_LAST_SEG(pdesc, 1);
674 SET_TX_DESC_OFFSET(pdesc, 0x20); 674 SET_TX_DESC_OFFSET(pdesc, 0x20);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index a7d63a84551a..c0201ed69dd7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -696,7 +696,7 @@ static void rtl92d_dm_check_edca_turbo(struct ieee80211_hw *hw)
696 if (rtlpriv->dm.current_turbo_edca) { 696 if (rtlpriv->dm.current_turbo_edca) {
697 u8 tmp = AC0_BE; 697 u8 tmp = AC0_BE;
698 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, 698 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
699 (u8 *) (&tmp)); 699 &tmp);
700 rtlpriv->dm.current_turbo_edca = false; 700 rtlpriv->dm.current_turbo_edca = false;
701 } 701 }
702 } 702 }
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
index f548a8d0068d..895ae6c1f354 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
@@ -120,7 +120,7 @@ static void _rtl92d_write_fw(struct ieee80211_hw *hw,
120{ 120{
121 struct rtl_priv *rtlpriv = rtl_priv(hw); 121 struct rtl_priv *rtlpriv = rtl_priv(hw);
122 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 122 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
123 u8 *bufferPtr = (u8 *) buffer; 123 u8 *bufferPtr = buffer;
124 u32 pagenums, remainSize; 124 u32 pagenums, remainSize;
125 u32 page, offset; 125 u32 page, offset;
126 126
@@ -256,8 +256,8 @@ int rtl92d_download_fw(struct ieee80211_hw *hw)
256 if (rtlpriv->max_fw_size == 0 || !rtlhal->pfirmware) 256 if (rtlpriv->max_fw_size == 0 || !rtlhal->pfirmware)
257 return 1; 257 return 1;
258 fwsize = rtlhal->fwsize; 258 fwsize = rtlhal->fwsize;
259 pfwheader = (u8 *) rtlhal->pfirmware; 259 pfwheader = rtlhal->pfirmware;
260 pfwdata = (u8 *) rtlhal->pfirmware; 260 pfwdata = rtlhal->pfirmware;
261 rtlhal->fw_version = (u16) GET_FIRMWARE_HDR_VERSION(pfwheader); 261 rtlhal->fw_version = (u16) GET_FIRMWARE_HDR_VERSION(pfwheader);
262 rtlhal->fw_subversion = (u16) GET_FIRMWARE_HDR_SUB_VER(pfwheader); 262 rtlhal->fw_subversion = (u16) GET_FIRMWARE_HDR_SUB_VER(pfwheader);
263 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, 263 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
index b338d526c422..f4051f4f0390 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
@@ -235,12 +235,12 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
235 for (e_aci = 0; e_aci < AC_MAX; e_aci++) 235 for (e_aci = 0; e_aci < AC_MAX; e_aci++)
236 rtlpriv->cfg->ops->set_hw_reg(hw, 236 rtlpriv->cfg->ops->set_hw_reg(hw,
237 HW_VAR_AC_PARAM, 237 HW_VAR_AC_PARAM,
238 (u8 *) (&e_aci)); 238 (&e_aci));
239 break; 239 break;
240 } 240 }
241 case HW_VAR_ACK_PREAMBLE: { 241 case HW_VAR_ACK_PREAMBLE: {
242 u8 reg_tmp; 242 u8 reg_tmp;
243 u8 short_preamble = (bool) (*(u8 *) val); 243 u8 short_preamble = (bool) (*val);
244 244
245 reg_tmp = (mac->cur_40_prime_sc) << 5; 245 reg_tmp = (mac->cur_40_prime_sc) << 5;
246 if (short_preamble) 246 if (short_preamble)
@@ -252,7 +252,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
252 u8 min_spacing_to_set; 252 u8 min_spacing_to_set;
253 u8 sec_min_space; 253 u8 sec_min_space;
254 254
255 min_spacing_to_set = *((u8 *) val); 255 min_spacing_to_set = *val;
256 if (min_spacing_to_set <= 7) { 256 if (min_spacing_to_set <= 7) {
257 sec_min_space = 0; 257 sec_min_space = 0;
258 if (min_spacing_to_set < sec_min_space) 258 if (min_spacing_to_set < sec_min_space)
@@ -271,7 +271,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
271 case HW_VAR_SHORTGI_DENSITY: { 271 case HW_VAR_SHORTGI_DENSITY: {
272 u8 density_to_set; 272 u8 density_to_set;
273 273
274 density_to_set = *((u8 *) val); 274 density_to_set = *val;
275 mac->min_space_cfg = rtlpriv->rtlhal.minspace_cfg; 275 mac->min_space_cfg = rtlpriv->rtlhal.minspace_cfg;
276 mac->min_space_cfg |= (density_to_set << 3); 276 mac->min_space_cfg |= (density_to_set << 3);
277 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, 277 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
@@ -293,7 +293,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
293 regtoSet = 0x66626641; 293 regtoSet = 0x66626641;
294 else 294 else
295 regtoSet = 0xb972a841; 295 regtoSet = 0xb972a841;
296 factor_toset = *((u8 *) val); 296 factor_toset = *val;
297 if (factor_toset <= 3) { 297 if (factor_toset <= 3) {
298 factor_toset = (1 << (factor_toset + 2)); 298 factor_toset = (1 << (factor_toset + 2));
299 if (factor_toset > 0xf) 299 if (factor_toset > 0xf)
@@ -316,15 +316,15 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
316 break; 316 break;
317 } 317 }
318 case HW_VAR_AC_PARAM: { 318 case HW_VAR_AC_PARAM: {
319 u8 e_aci = *((u8 *) val); 319 u8 e_aci = *val;
320 rtl92d_dm_init_edca_turbo(hw); 320 rtl92d_dm_init_edca_turbo(hw);
321 if (rtlpci->acm_method != eAcmWay2_SW) 321 if (rtlpci->acm_method != eAcmWay2_SW)
322 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL, 322 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL,
323 (u8 *) (&e_aci)); 323 &e_aci);
324 break; 324 break;
325 } 325 }
326 case HW_VAR_ACM_CTRL: { 326 case HW_VAR_ACM_CTRL: {
327 u8 e_aci = *((u8 *) val); 327 u8 e_aci = *val;
328 union aci_aifsn *p_aci_aifsn = 328 union aci_aifsn *p_aci_aifsn =
329 (union aci_aifsn *)(&(mac->ac[0].aifs)); 329 (union aci_aifsn *)(&(mac->ac[0].aifs));
330 u8 acm = p_aci_aifsn->f.acm; 330 u8 acm = p_aci_aifsn->f.acm;
@@ -376,7 +376,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
376 rtlpci->receive_config = ((u32 *) (val))[0]; 376 rtlpci->receive_config = ((u32 *) (val))[0];
377 break; 377 break;
378 case HW_VAR_RETRY_LIMIT: { 378 case HW_VAR_RETRY_LIMIT: {
379 u8 retry_limit = ((u8 *) (val))[0]; 379 u8 retry_limit = val[0];
380 380
381 rtl_write_word(rtlpriv, REG_RL, 381 rtl_write_word(rtlpriv, REG_RL,
382 retry_limit << RETRY_LIMIT_SHORT_SHIFT | 382 retry_limit << RETRY_LIMIT_SHORT_SHIFT |
@@ -390,16 +390,16 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
390 rtlefuse->efuse_usedbytes = *((u16 *) val); 390 rtlefuse->efuse_usedbytes = *((u16 *) val);
391 break; 391 break;
392 case HW_VAR_EFUSE_USAGE: 392 case HW_VAR_EFUSE_USAGE:
393 rtlefuse->efuse_usedpercentage = *((u8 *) val); 393 rtlefuse->efuse_usedpercentage = *val;
394 break; 394 break;
395 case HW_VAR_IO_CMD: 395 case HW_VAR_IO_CMD:
396 rtl92d_phy_set_io_cmd(hw, (*(enum io_type *)val)); 396 rtl92d_phy_set_io_cmd(hw, (*(enum io_type *)val));
397 break; 397 break;
398 case HW_VAR_WPA_CONFIG: 398 case HW_VAR_WPA_CONFIG:
399 rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val)); 399 rtl_write_byte(rtlpriv, REG_SECCFG, *val);
400 break; 400 break;
401 case HW_VAR_SET_RPWM: 401 case HW_VAR_SET_RPWM:
402 rtl92d_fill_h2c_cmd(hw, H2C_PWRM, 1, (u8 *) (val)); 402 rtl92d_fill_h2c_cmd(hw, H2C_PWRM, 1, (val));
403 break; 403 break;
404 case HW_VAR_H2C_FW_PWRMODE: 404 case HW_VAR_H2C_FW_PWRMODE:
405 break; 405 break;
@@ -407,7 +407,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
407 ppsc->fw_current_inpsmode = *((bool *) val); 407 ppsc->fw_current_inpsmode = *((bool *) val);
408 break; 408 break;
409 case HW_VAR_H2C_FW_JOINBSSRPT: { 409 case HW_VAR_H2C_FW_JOINBSSRPT: {
410 u8 mstatus = (*(u8 *) val); 410 u8 mstatus = (*val);
411 u8 tmp_regcr, tmp_reg422; 411 u8 tmp_regcr, tmp_reg422;
412 bool recover = false; 412 bool recover = false;
413 413
@@ -435,7 +435,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
435 rtl_write_byte(rtlpriv, REG_CR + 1, 435 rtl_write_byte(rtlpriv, REG_CR + 1,
436 (tmp_regcr & ~(BIT(0)))); 436 (tmp_regcr & ~(BIT(0))));
437 } 437 }
438 rtl92d_set_fw_joinbss_report_cmd(hw, (*(u8 *) val)); 438 rtl92d_set_fw_joinbss_report_cmd(hw, (*val));
439 break; 439 break;
440 } 440 }
441 case HW_VAR_AID: { 441 case HW_VAR_AID: {
@@ -447,7 +447,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
447 break; 447 break;
448 } 448 }
449 case HW_VAR_CORRECT_TSF: { 449 case HW_VAR_CORRECT_TSF: {
450 u8 btype_ibss = ((u8 *) (val))[0]; 450 u8 btype_ibss = val[0];
451 451
452 if (btype_ibss) 452 if (btype_ibss)
453 _rtl92de_stop_tx_beacon(hw); 453 _rtl92de_stop_tx_beacon(hw);
@@ -1794,7 +1794,7 @@ static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw)
1794 "RTL819X Not boot from eeprom, check it !!\n"); 1794 "RTL819X Not boot from eeprom, check it !!\n");
1795 return; 1795 return;
1796 } 1796 }
1797 rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID]; 1797 rtlefuse->eeprom_oemid = hwinfo[EEPROM_CUSTOMER_ID];
1798 _rtl92de_read_macphymode_and_bandtype(hw, hwinfo); 1798 _rtl92de_read_macphymode_and_bandtype(hw, hwinfo);
1799 1799
1800 /* VID, DID SE 0xA-D */ 1800 /* VID, DID SE 0xA-D */
@@ -2115,7 +2115,7 @@ void rtl92de_update_channel_access_setting(struct ieee80211_hw *hw)
2115 u16 sifs_timer; 2115 u16 sifs_timer;
2116 2116
2117 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, 2117 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
2118 (u8 *)&mac->slot_time); 2118 &mac->slot_time);
2119 if (!mac->ht_enable) 2119 if (!mac->ht_enable)
2120 sifs_timer = 0x0a0a; 2120 sifs_timer = 0x0a0a;
2121 else 2121 else
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
index 1666ef7fd87b..f80690d82c11 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
@@ -560,7 +560,7 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
560 struct rtl_hal *rtlhal = rtl_hal(rtlpriv); 560 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
561 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 561 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
562 struct ieee80211_sta *sta = info->control.sta; 562 struct ieee80211_sta *sta = info->control.sta;
563 u8 *pdesc = (u8 *) pdesc_tx; 563 u8 *pdesc = pdesc_tx;
564 u16 seq_number; 564 u16 seq_number;
565 __le16 fc = hdr->frame_control; 565 __le16 fc = hdr->frame_control;
566 unsigned int buf_len = 0; 566 unsigned int buf_len = 0;
@@ -761,11 +761,11 @@ void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw,
761 SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue); 761 SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
762 SET_TX_DESC_FIRST_SEG(pdesc, 1); 762 SET_TX_DESC_FIRST_SEG(pdesc, 1);
763 SET_TX_DESC_LAST_SEG(pdesc, 1); 763 SET_TX_DESC_LAST_SEG(pdesc, 1);
764 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) (skb->len)); 764 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)skb->len);
765 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping); 765 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
766 SET_TX_DESC_RATE_ID(pdesc, 7); 766 SET_TX_DESC_RATE_ID(pdesc, 7);
767 SET_TX_DESC_MACID(pdesc, 0); 767 SET_TX_DESC_MACID(pdesc, 0);
768 SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len)); 768 SET_TX_DESC_PKT_SIZE(pdesc, (u16) (skb->len));
769 SET_TX_DESC_FIRST_SEG(pdesc, 1); 769 SET_TX_DESC_FIRST_SEG(pdesc, 1);
770 SET_TX_DESC_LAST_SEG(pdesc, 1); 770 SET_TX_DESC_LAST_SEG(pdesc, 1);
771 SET_TX_DESC_OFFSET(pdesc, 0x20); 771 SET_TX_DESC_OFFSET(pdesc, 0x20);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
index 2e1158026fb7..465f58157101 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
@@ -146,7 +146,7 @@ static void _rtl92s_dm_check_edca_turbo(struct ieee80211_hw *hw)
146 if (rtlpriv->dm.current_turbo_edca) { 146 if (rtlpriv->dm.current_turbo_edca) {
147 u8 tmp = AC0_BE; 147 u8 tmp = AC0_BE;
148 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, 148 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
149 (u8 *)(&tmp)); 149 &tmp);
150 rtlpriv->dm.current_turbo_edca = false; 150 rtlpriv->dm.current_turbo_edca = false;
151 } 151 }
152 } 152 }
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
index b141c35bf926..4542e6952b97 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
@@ -145,13 +145,13 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
145 for (e_aci = 0; e_aci < AC_MAX; e_aci++) { 145 for (e_aci = 0; e_aci < AC_MAX; e_aci++) {
146 rtlpriv->cfg->ops->set_hw_reg(hw, 146 rtlpriv->cfg->ops->set_hw_reg(hw,
147 HW_VAR_AC_PARAM, 147 HW_VAR_AC_PARAM,
148 (u8 *)(&e_aci)); 148 (&e_aci));
149 } 149 }
150 break; 150 break;
151 } 151 }
152 case HW_VAR_ACK_PREAMBLE:{ 152 case HW_VAR_ACK_PREAMBLE:{
153 u8 reg_tmp; 153 u8 reg_tmp;
154 u8 short_preamble = (bool) (*(u8 *) val); 154 u8 short_preamble = (bool) (*val);
155 reg_tmp = (mac->cur_40_prime_sc) << 5; 155 reg_tmp = (mac->cur_40_prime_sc) << 5;
156 if (short_preamble) 156 if (short_preamble)
157 reg_tmp |= 0x80; 157 reg_tmp |= 0x80;
@@ -163,7 +163,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
163 u8 min_spacing_to_set; 163 u8 min_spacing_to_set;
164 u8 sec_min_space; 164 u8 sec_min_space;
165 165
166 min_spacing_to_set = *((u8 *)val); 166 min_spacing_to_set = *val;
167 if (min_spacing_to_set <= 7) { 167 if (min_spacing_to_set <= 7) {
168 if (rtlpriv->sec.pairwise_enc_algorithm == 168 if (rtlpriv->sec.pairwise_enc_algorithm ==
169 NO_ENCRYPTION) 169 NO_ENCRYPTION)
@@ -194,7 +194,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
194 case HW_VAR_SHORTGI_DENSITY:{ 194 case HW_VAR_SHORTGI_DENSITY:{
195 u8 density_to_set; 195 u8 density_to_set;
196 196
197 density_to_set = *((u8 *) val); 197 density_to_set = *val;
198 mac->min_space_cfg = rtlpriv->rtlhal.minspace_cfg; 198 mac->min_space_cfg = rtlpriv->rtlhal.minspace_cfg;
199 mac->min_space_cfg |= (density_to_set << 3); 199 mac->min_space_cfg |= (density_to_set << 3);
200 200
@@ -216,7 +216,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
216 15, 15, 15, 15, 0}; 216 15, 15, 15, 15, 0};
217 u8 index = 0; 217 u8 index = 0;
218 218
219 factor_toset = *((u8 *) val); 219 factor_toset = *val;
220 if (factor_toset <= 3) { 220 if (factor_toset <= 3) {
221 factor_toset = (1 << (factor_toset + 2)); 221 factor_toset = (1 << (factor_toset + 2));
222 if (factor_toset > 0xf) 222 if (factor_toset > 0xf)
@@ -248,17 +248,17 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
248 break; 248 break;
249 } 249 }
250 case HW_VAR_AC_PARAM:{ 250 case HW_VAR_AC_PARAM:{
251 u8 e_aci = *((u8 *) val); 251 u8 e_aci = *val;
252 rtl92s_dm_init_edca_turbo(hw); 252 rtl92s_dm_init_edca_turbo(hw);
253 253
254 if (rtlpci->acm_method != eAcmWay2_SW) 254 if (rtlpci->acm_method != eAcmWay2_SW)
255 rtlpriv->cfg->ops->set_hw_reg(hw, 255 rtlpriv->cfg->ops->set_hw_reg(hw,
256 HW_VAR_ACM_CTRL, 256 HW_VAR_ACM_CTRL,
257 (u8 *)(&e_aci)); 257 &e_aci);
258 break; 258 break;
259 } 259 }
260 case HW_VAR_ACM_CTRL:{ 260 case HW_VAR_ACM_CTRL:{
261 u8 e_aci = *((u8 *) val); 261 u8 e_aci = *val;
262 union aci_aifsn *p_aci_aifsn = (union aci_aifsn *)(&( 262 union aci_aifsn *p_aci_aifsn = (union aci_aifsn *)(&(
263 mac->ac[0].aifs)); 263 mac->ac[0].aifs));
264 u8 acm = p_aci_aifsn->f.acm; 264 u8 acm = p_aci_aifsn->f.acm;
@@ -313,7 +313,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
313 break; 313 break;
314 } 314 }
315 case HW_VAR_RETRY_LIMIT:{ 315 case HW_VAR_RETRY_LIMIT:{
316 u8 retry_limit = ((u8 *) (val))[0]; 316 u8 retry_limit = val[0];
317 317
318 rtl_write_word(rtlpriv, RETRY_LIMIT, 318 rtl_write_word(rtlpriv, RETRY_LIMIT,
319 retry_limit << RETRY_LIMIT_SHORT_SHIFT | 319 retry_limit << RETRY_LIMIT_SHORT_SHIFT |
@@ -328,14 +328,14 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
328 break; 328 break;
329 } 329 }
330 case HW_VAR_EFUSE_USAGE: { 330 case HW_VAR_EFUSE_USAGE: {
331 rtlefuse->efuse_usedpercentage = *((u8 *) val); 331 rtlefuse->efuse_usedpercentage = *val;
332 break; 332 break;
333 } 333 }
334 case HW_VAR_IO_CMD: { 334 case HW_VAR_IO_CMD: {
335 break; 335 break;
336 } 336 }
337 case HW_VAR_WPA_CONFIG: { 337 case HW_VAR_WPA_CONFIG: {
338 rtl_write_byte(rtlpriv, REG_SECR, *((u8 *) val)); 338 rtl_write_byte(rtlpriv, REG_SECR, *val);
339 break; 339 break;
340 } 340 }
341 case HW_VAR_SET_RPWM:{ 341 case HW_VAR_SET_RPWM:{
@@ -1813,8 +1813,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
1813 else 1813 else
1814 index = 2; 1814 index = 2;
1815 1815
1816 tempval = (*(u8 *)&hwinfo[EEPROM_TX_PWR_HT20_DIFF + 1816 tempval = hwinfo[EEPROM_TX_PWR_HT20_DIFF + index] & 0xff;
1817 index]) & 0xff;
1818 rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] = (tempval & 0xF); 1817 rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] = (tempval & 0xF);
1819 rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] = 1818 rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] =
1820 ((tempval >> 4) & 0xF); 1819 ((tempval >> 4) & 0xF);
@@ -1830,14 +1829,13 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
1830 else 1829 else
1831 index = 1; 1830 index = 1;
1832 1831
1833 tempval = (*(u8 *)&hwinfo[EEPROM_TX_PWR_OFDM_DIFF + index]) 1832 tempval = hwinfo[EEPROM_TX_PWR_OFDM_DIFF + index] & 0xff;
1834 & 0xff;
1835 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i] = 1833 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i] =
1836 (tempval & 0xF); 1834 (tempval & 0xF);
1837 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i] = 1835 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i] =
1838 ((tempval >> 4) & 0xF); 1836 ((tempval >> 4) & 0xF);
1839 1837
1840 tempval = (*(u8 *)&hwinfo[TX_PWR_SAFETY_CHK]); 1838 tempval = hwinfo[TX_PWR_SAFETY_CHK];
1841 rtlefuse->txpwr_safetyflag = (tempval & 0x01); 1839 rtlefuse->txpwr_safetyflag = (tempval & 0x01);
1842 } 1840 }
1843 1841
@@ -1876,7 +1874,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
1876 1874
1877 /* Read RF-indication and Tx Power gain 1875 /* Read RF-indication and Tx Power gain
1878 * index diff of legacy to HT OFDM rate. */ 1876 * index diff of legacy to HT OFDM rate. */
1879 tempval = (*(u8 *)&hwinfo[EEPROM_RFIND_POWERDIFF]) & 0xff; 1877 tempval = hwinfo[EEPROM_RFIND_POWERDIFF] & 0xff;
1880 rtlefuse->eeprom_txpowerdiff = tempval; 1878 rtlefuse->eeprom_txpowerdiff = tempval;
1881 rtlefuse->legacy_httxpowerdiff = 1879 rtlefuse->legacy_httxpowerdiff =
1882 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][0]; 1880 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][0];
@@ -1887,7 +1885,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
1887 /* Get TSSI value for each path. */ 1885 /* Get TSSI value for each path. */
1888 usvalue = *(u16 *)&hwinfo[EEPROM_TSSI_A]; 1886 usvalue = *(u16 *)&hwinfo[EEPROM_TSSI_A];
1889 rtlefuse->eeprom_tssi[RF90_PATH_A] = (u8)((usvalue & 0xff00) >> 8); 1887 rtlefuse->eeprom_tssi[RF90_PATH_A] = (u8)((usvalue & 0xff00) >> 8);
1890 usvalue = *(u8 *)&hwinfo[EEPROM_TSSI_B]; 1888 usvalue = hwinfo[EEPROM_TSSI_B];
1891 rtlefuse->eeprom_tssi[RF90_PATH_B] = (u8)(usvalue & 0xff); 1889 rtlefuse->eeprom_tssi[RF90_PATH_B] = (u8)(usvalue & 0xff);
1892 1890
1893 RTPRINT(rtlpriv, FINIT, INIT_TxPower, "TSSI_A = 0x%x, TSSI_B = 0x%x\n", 1891 RTPRINT(rtlpriv, FINIT, INIT_TxPower, "TSSI_A = 0x%x, TSSI_B = 0x%x\n",
@@ -1896,7 +1894,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
1896 1894
1897 /* Read antenna tx power offset of B/C/D to A from EEPROM */ 1895 /* Read antenna tx power offset of B/C/D to A from EEPROM */
1898 /* and read ThermalMeter from EEPROM */ 1896 /* and read ThermalMeter from EEPROM */
1899 tempval = *(u8 *)&hwinfo[EEPROM_THERMALMETER]; 1897 tempval = hwinfo[EEPROM_THERMALMETER];
1900 rtlefuse->eeprom_thermalmeter = tempval; 1898 rtlefuse->eeprom_thermalmeter = tempval;
1901 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1899 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
1902 "thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter); 1900 "thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter);
@@ -1906,20 +1904,20 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
1906 rtlefuse->tssi_13dbm = rtlefuse->eeprom_thermalmeter * 100; 1904 rtlefuse->tssi_13dbm = rtlefuse->eeprom_thermalmeter * 100;
1907 1905
1908 /* Read CrystalCap from EEPROM */ 1906 /* Read CrystalCap from EEPROM */
1909 tempval = (*(u8 *)&hwinfo[EEPROM_CRYSTALCAP]) >> 4; 1907 tempval = hwinfo[EEPROM_CRYSTALCAP] >> 4;
1910 rtlefuse->eeprom_crystalcap = tempval; 1908 rtlefuse->eeprom_crystalcap = tempval;
1911 /* CrystalCap, BIT(12)~15 */ 1909 /* CrystalCap, BIT(12)~15 */
1912 rtlefuse->crystalcap = rtlefuse->eeprom_crystalcap; 1910 rtlefuse->crystalcap = rtlefuse->eeprom_crystalcap;
1913 1911
1914 /* Read IC Version && Channel Plan */ 1912 /* Read IC Version && Channel Plan */
1915 /* Version ID, Channel plan */ 1913 /* Version ID, Channel plan */
1916 rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN]; 1914 rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN];
1917 rtlefuse->txpwr_fromeprom = true; 1915 rtlefuse->txpwr_fromeprom = true;
1918 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1916 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
1919 "EEPROM ChannelPlan = 0x%4x\n", rtlefuse->eeprom_channelplan); 1917 "EEPROM ChannelPlan = 0x%4x\n", rtlefuse->eeprom_channelplan);
1920 1918
1921 /* Read Customer ID or Board Type!!! */ 1919 /* Read Customer ID or Board Type!!! */
1922 tempval = *(u8 *)&hwinfo[EEPROM_BOARDTYPE]; 1920 tempval = hwinfo[EEPROM_BOARDTYPE];
1923 /* Change RF type definition */ 1921 /* Change RF type definition */
1924 if (tempval == 0) 1922 if (tempval == 0)
1925 rtlphy->rf_type = RF_2T2R; 1923 rtlphy->rf_type = RF_2T2R;
@@ -1941,7 +1939,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
1941 } 1939 }
1942 } 1940 }
1943 rtlefuse->b1ss_support = rtlefuse->b1x1_recvcombine; 1941 rtlefuse->b1ss_support = rtlefuse->b1x1_recvcombine;
1944 rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMID]; 1942 rtlefuse->eeprom_oemid = *&hwinfo[EEPROM_CUSTOMID];
1945 1943
1946 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM Customer ID: 0x%2x", 1944 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM Customer ID: 0x%2x",
1947 rtlefuse->eeprom_oemid); 1945 rtlefuse->eeprom_oemid);
@@ -2251,7 +2249,7 @@ void rtl92se_update_channel_access_setting(struct ieee80211_hw *hw)
2251 u16 sifs_timer; 2249 u16 sifs_timer;
2252 2250
2253 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, 2251 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
2254 (u8 *)&mac->slot_time); 2252 &mac->slot_time);
2255 sifs_timer = 0x0e0e; 2253 sifs_timer = 0x0e0e;
2256 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer); 2254 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer);
2257 2255
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
index 8d7099bc472c..b917a2a3caf7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
@@ -1247,6 +1247,9 @@ static void _rtl92s_phy_get_txpower_index(struct ieee80211_hw *hw, u8 channel,
1247 /* Read HT 40 OFDM TX power */ 1247 /* Read HT 40 OFDM TX power */
1248 ofdmpowerLevel[0] = rtlefuse->txpwrlevel_ht40_2s[0][index]; 1248 ofdmpowerLevel[0] = rtlefuse->txpwrlevel_ht40_2s[0][index];
1249 ofdmpowerLevel[1] = rtlefuse->txpwrlevel_ht40_2s[1][index]; 1249 ofdmpowerLevel[1] = rtlefuse->txpwrlevel_ht40_2s[1][index];
1250 } else {
1251 ofdmpowerLevel[0] = 0;
1252 ofdmpowerLevel[1] = 0;
1250 } 1253 }
1251} 1254}
1252 1255
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
index 730bcc919529..ad4b4803482d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
@@ -29,7 +29,6 @@
29 29
30#include "../wifi.h" 30#include "../wifi.h"
31#include "../core.h" 31#include "../core.h"
32#include "../pci.h"
33#include "../base.h" 32#include "../base.h"
34#include "../pci.h" 33#include "../pci.h"
35#include "reg.h" 34#include "reg.h"
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index 812b5858f14a..36d1cb3aef8a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -599,7 +599,7 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
599 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 599 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
600 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 600 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
601 struct ieee80211_sta *sta = info->control.sta; 601 struct ieee80211_sta *sta = info->control.sta;
602 u8 *pdesc = (u8 *) pdesc_tx; 602 u8 *pdesc = pdesc_tx;
603 u16 seq_number; 603 u16 seq_number;
604 __le16 fc = hdr->frame_control; 604 __le16 fc = hdr->frame_control;
605 u8 reserved_macid = 0; 605 u8 reserved_macid = 0;
diff --git a/drivers/net/wireless/ti/Kconfig b/drivers/net/wireless/ti/Kconfig
index 1a72932e2213..be800119d0a3 100644
--- a/drivers/net/wireless/ti/Kconfig
+++ b/drivers/net/wireless/ti/Kconfig
@@ -8,6 +8,7 @@ menuconfig WL_TI
8if WL_TI 8if WL_TI
9source "drivers/net/wireless/ti/wl1251/Kconfig" 9source "drivers/net/wireless/ti/wl1251/Kconfig"
10source "drivers/net/wireless/ti/wl12xx/Kconfig" 10source "drivers/net/wireless/ti/wl12xx/Kconfig"
11source "drivers/net/wireless/ti/wl18xx/Kconfig"
11 12
12# keep last for automatic dependencies 13# keep last for automatic dependencies
13source "drivers/net/wireless/ti/wlcore/Kconfig" 14source "drivers/net/wireless/ti/wlcore/Kconfig"
diff --git a/drivers/net/wireless/ti/Makefile b/drivers/net/wireless/ti/Makefile
index 0a565622d4a4..4d6823983c04 100644
--- a/drivers/net/wireless/ti/Makefile
+++ b/drivers/net/wireless/ti/Makefile
@@ -2,3 +2,4 @@ obj-$(CONFIG_WLCORE) += wlcore/
2obj-$(CONFIG_WL12XX) += wl12xx/ 2obj-$(CONFIG_WL12XX) += wl12xx/
3obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wlcore/ 3obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wlcore/
4obj-$(CONFIG_WL1251) += wl1251/ 4obj-$(CONFIG_WL1251) += wl1251/
5obj-$(CONFIG_WL18XX) += wl18xx/
diff --git a/drivers/net/wireless/ti/wl1251/cmd.c b/drivers/net/wireless/ti/wl1251/cmd.c
index d14d69d733a0..6822b845efc1 100644
--- a/drivers/net/wireless/ti/wl1251/cmd.c
+++ b/drivers/net/wireless/ti/wl1251/cmd.c
@@ -277,15 +277,6 @@ int wl1251_cmd_join(struct wl1251 *wl, u8 bss_type, u8 channel,
277 join->rx_config_options = wl->rx_config; 277 join->rx_config_options = wl->rx_config;
278 join->rx_filter_options = wl->rx_filter; 278 join->rx_filter_options = wl->rx_filter;
279 279
280 /*
281 * FIXME: disable temporarily all filters because after commit
282 * 9cef8737 "mac80211: fix managed mode BSSID handling" broke
283 * association. The filter logic needs to be implemented properly
284 * and once that is done, this hack can be removed.
285 */
286 join->rx_config_options = 0;
287 join->rx_filter_options = WL1251_DEFAULT_RX_FILTER;
288
289 join->basic_rate_set = RATE_MASK_1MBPS | RATE_MASK_2MBPS | 280 join->basic_rate_set = RATE_MASK_1MBPS | RATE_MASK_2MBPS |
290 RATE_MASK_5_5MBPS | RATE_MASK_11MBPS; 281 RATE_MASK_5_5MBPS | RATE_MASK_11MBPS;
291 282
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index d1afb8e3b2ef..3118c425bcf1 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -334,6 +334,12 @@ static int wl1251_join(struct wl1251 *wl, u8 bss_type, u8 channel,
334 if (ret < 0) 334 if (ret < 0)
335 goto out; 335 goto out;
336 336
337 /*
338 * Join command applies filters, and if we are not associated,
339 * BSSID filter must be disabled for association to work.
340 */
341 if (is_zero_ether_addr(wl->bssid))
342 wl->rx_config &= ~CFG_BSSID_FILTER_EN;
337 343
338 ret = wl1251_cmd_join(wl, bss_type, channel, beacon_interval, 344 ret = wl1251_cmd_join(wl, bss_type, channel, beacon_interval,
339 dtim_period); 345 dtim_period);
@@ -348,33 +354,6 @@ out:
348 return ret; 354 return ret;
349} 355}
350 356
351static void wl1251_filter_work(struct work_struct *work)
352{
353 struct wl1251 *wl =
354 container_of(work, struct wl1251, filter_work);
355 int ret;
356
357 mutex_lock(&wl->mutex);
358
359 if (wl->state == WL1251_STATE_OFF)
360 goto out;
361
362 ret = wl1251_ps_elp_wakeup(wl);
363 if (ret < 0)
364 goto out;
365
366 ret = wl1251_join(wl, wl->bss_type, wl->channel, wl->beacon_int,
367 wl->dtim_period);
368 if (ret < 0)
369 goto out_sleep;
370
371out_sleep:
372 wl1251_ps_elp_sleep(wl);
373
374out:
375 mutex_unlock(&wl->mutex);
376}
377
378static void wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 357static void wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
379{ 358{
380 struct wl1251 *wl = hw->priv; 359 struct wl1251 *wl = hw->priv;
@@ -478,7 +457,6 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
478 457
479 cancel_work_sync(&wl->irq_work); 458 cancel_work_sync(&wl->irq_work);
480 cancel_work_sync(&wl->tx_work); 459 cancel_work_sync(&wl->tx_work);
481 cancel_work_sync(&wl->filter_work);
482 cancel_delayed_work_sync(&wl->elp_work); 460 cancel_delayed_work_sync(&wl->elp_work);
483 461
484 mutex_lock(&wl->mutex); 462 mutex_lock(&wl->mutex);
@@ -681,13 +659,15 @@ out:
681 FIF_FCSFAIL | \ 659 FIF_FCSFAIL | \
682 FIF_BCN_PRBRESP_PROMISC | \ 660 FIF_BCN_PRBRESP_PROMISC | \
683 FIF_CONTROL | \ 661 FIF_CONTROL | \
684 FIF_OTHER_BSS) 662 FIF_OTHER_BSS | \
663 FIF_PROBE_REQ)
685 664
686static void wl1251_op_configure_filter(struct ieee80211_hw *hw, 665static void wl1251_op_configure_filter(struct ieee80211_hw *hw,
687 unsigned int changed, 666 unsigned int changed,
688 unsigned int *total,u64 multicast) 667 unsigned int *total,u64 multicast)
689{ 668{
690 struct wl1251 *wl = hw->priv; 669 struct wl1251 *wl = hw->priv;
670 int ret;
691 671
692 wl1251_debug(DEBUG_MAC80211, "mac80211 configure filter"); 672 wl1251_debug(DEBUG_MAC80211, "mac80211 configure filter");
693 673
@@ -698,7 +678,7 @@ static void wl1251_op_configure_filter(struct ieee80211_hw *hw,
698 /* no filters which we support changed */ 678 /* no filters which we support changed */
699 return; 679 return;
700 680
701 /* FIXME: wl->rx_config and wl->rx_filter are not protected */ 681 mutex_lock(&wl->mutex);
702 682
703 wl->rx_config = WL1251_DEFAULT_RX_CONFIG; 683 wl->rx_config = WL1251_DEFAULT_RX_CONFIG;
704 wl->rx_filter = WL1251_DEFAULT_RX_FILTER; 684 wl->rx_filter = WL1251_DEFAULT_RX_FILTER;
@@ -721,15 +701,25 @@ static void wl1251_op_configure_filter(struct ieee80211_hw *hw,
721 } 701 }
722 if (*total & FIF_CONTROL) 702 if (*total & FIF_CONTROL)
723 wl->rx_filter |= CFG_RX_CTL_EN; 703 wl->rx_filter |= CFG_RX_CTL_EN;
724 if (*total & FIF_OTHER_BSS) 704 if (*total & FIF_OTHER_BSS || is_zero_ether_addr(wl->bssid))
725 wl->rx_filter &= ~CFG_BSSID_FILTER_EN; 705 wl->rx_config &= ~CFG_BSSID_FILTER_EN;
706 if (*total & FIF_PROBE_REQ)
707 wl->rx_filter |= CFG_RX_PREQ_EN;
726 708
727 /* 709 if (wl->state == WL1251_STATE_OFF)
728 * FIXME: workqueues need to be properly cancelled on stop(), for 710 goto out;
729 * now let's just disable changing the filter settings. They will 711
730 * be updated any on config(). 712 ret = wl1251_ps_elp_wakeup(wl);
731 */ 713 if (ret < 0)
732 /* schedule_work(&wl->filter_work); */ 714 goto out;
715
716 /* send filters to firmware */
717 wl1251_acx_rx_config(wl, wl->rx_config, wl->rx_filter);
718
719 wl1251_ps_elp_sleep(wl);
720
721out:
722 mutex_unlock(&wl->mutex);
733} 723}
734 724
735/* HW encryption */ 725/* HW encryption */
@@ -1390,7 +1380,6 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
1390 1380
1391 skb_queue_head_init(&wl->tx_queue); 1381 skb_queue_head_init(&wl->tx_queue);
1392 1382
1393 INIT_WORK(&wl->filter_work, wl1251_filter_work);
1394 INIT_DELAYED_WORK(&wl->elp_work, wl1251_elp_work); 1383 INIT_DELAYED_WORK(&wl->elp_work, wl1251_elp_work);
1395 wl->channel = WL1251_DEFAULT_CHANNEL; 1384 wl->channel = WL1251_DEFAULT_CHANNEL;
1396 wl->scanning = false; 1385 wl->scanning = false;
diff --git a/drivers/net/wireless/ti/wl1251/wl1251.h b/drivers/net/wireless/ti/wl1251/wl1251.h
index 9d8f5816c6f9..fd02060038de 100644
--- a/drivers/net/wireless/ti/wl1251/wl1251.h
+++ b/drivers/net/wireless/ti/wl1251/wl1251.h
@@ -315,7 +315,6 @@ struct wl1251 {
315 bool tx_queue_stopped; 315 bool tx_queue_stopped;
316 316
317 struct work_struct tx_work; 317 struct work_struct tx_work;
318 struct work_struct filter_work;
319 318
320 /* Pending TX frames */ 319 /* Pending TX frames */
321 struct sk_buff *tx_frames[16]; 320 struct sk_buff *tx_frames[16];
diff --git a/drivers/net/wireless/ti/wl12xx/Makefile b/drivers/net/wireless/ti/wl12xx/Makefile
index 87f64b14db35..da509aa7d009 100644
--- a/drivers/net/wireless/ti/wl12xx/Makefile
+++ b/drivers/net/wireless/ti/wl12xx/Makefile
@@ -1,3 +1,3 @@
1wl12xx-objs = main.o cmd.o acx.o 1wl12xx-objs = main.o cmd.o acx.o debugfs.o
2 2
3obj-$(CONFIG_WL12XX) += wl12xx.o 3obj-$(CONFIG_WL12XX) += wl12xx.o
diff --git a/drivers/net/wireless/ti/wl12xx/acx.h b/drivers/net/wireless/ti/wl12xx/acx.h
index d1f5aba0afce..2a26868b837d 100644
--- a/drivers/net/wireless/ti/wl12xx/acx.h
+++ b/drivers/net/wireless/ti/wl12xx/acx.h
@@ -24,6 +24,21 @@
24#define __WL12XX_ACX_H__ 24#define __WL12XX_ACX_H__
25 25
26#include "../wlcore/wlcore.h" 26#include "../wlcore/wlcore.h"
27#include "../wlcore/acx.h"
28
29#define WL12XX_ACX_ALL_EVENTS_VECTOR (WL1271_ACX_INTR_WATCHDOG | \
30 WL1271_ACX_INTR_INIT_COMPLETE | \
31 WL1271_ACX_INTR_EVENT_A | \
32 WL1271_ACX_INTR_EVENT_B | \
33 WL1271_ACX_INTR_CMD_COMPLETE | \
34 WL1271_ACX_INTR_HW_AVAILABLE | \
35 WL1271_ACX_INTR_DATA)
36
37#define WL12XX_INTR_MASK (WL1271_ACX_INTR_WATCHDOG | \
38 WL1271_ACX_INTR_EVENT_A | \
39 WL1271_ACX_INTR_EVENT_B | \
40 WL1271_ACX_INTR_HW_AVAILABLE | \
41 WL1271_ACX_INTR_DATA)
27 42
28struct wl1271_acx_host_config_bitmap { 43struct wl1271_acx_host_config_bitmap {
29 struct acx_header header; 44 struct acx_header header;
@@ -31,6 +46,228 @@ struct wl1271_acx_host_config_bitmap {
31 __le32 host_cfg_bitmap; 46 __le32 host_cfg_bitmap;
32} __packed; 47} __packed;
33 48
49struct wl12xx_acx_tx_statistics {
50 __le32 internal_desc_overflow;
51} __packed;
52
53struct wl12xx_acx_rx_statistics {
54 __le32 out_of_mem;
55 __le32 hdr_overflow;
56 __le32 hw_stuck;
57 __le32 dropped;
58 __le32 fcs_err;
59 __le32 xfr_hint_trig;
60 __le32 path_reset;
61 __le32 reset_counter;
62} __packed;
63
64struct wl12xx_acx_dma_statistics {
65 __le32 rx_requested;
66 __le32 rx_errors;
67 __le32 tx_requested;
68 __le32 tx_errors;
69} __packed;
70
71struct wl12xx_acx_isr_statistics {
72 /* host command complete */
73 __le32 cmd_cmplt;
74
75 /* fiqisr() */
76 __le32 fiqs;
77
78 /* (INT_STS_ND & INT_TRIG_RX_HEADER) */
79 __le32 rx_headers;
80
81 /* (INT_STS_ND & INT_TRIG_RX_CMPLT) */
82 __le32 rx_completes;
83
84 /* (INT_STS_ND & INT_TRIG_NO_RX_BUF) */
85 __le32 rx_mem_overflow;
86
87 /* (INT_STS_ND & INT_TRIG_S_RX_RDY) */
88 __le32 rx_rdys;
89
90 /* irqisr() */
91 __le32 irqs;
92
93 /* (INT_STS_ND & INT_TRIG_TX_PROC) */
94 __le32 tx_procs;
95
96 /* (INT_STS_ND & INT_TRIG_DECRYPT_DONE) */
97 __le32 decrypt_done;
98
99 /* (INT_STS_ND & INT_TRIG_DMA0) */
100 __le32 dma0_done;
101
102 /* (INT_STS_ND & INT_TRIG_DMA1) */
103 __le32 dma1_done;
104
105 /* (INT_STS_ND & INT_TRIG_TX_EXC_CMPLT) */
106 __le32 tx_exch_complete;
107
108 /* (INT_STS_ND & INT_TRIG_COMMAND) */
109 __le32 commands;
110
111 /* (INT_STS_ND & INT_TRIG_RX_PROC) */
112 __le32 rx_procs;
113
114 /* (INT_STS_ND & INT_TRIG_PM_802) */
115 __le32 hw_pm_mode_changes;
116
117 /* (INT_STS_ND & INT_TRIG_ACKNOWLEDGE) */
118 __le32 host_acknowledges;
119
120 /* (INT_STS_ND & INT_TRIG_PM_PCI) */
121 __le32 pci_pm;
122
123 /* (INT_STS_ND & INT_TRIG_ACM_WAKEUP) */
124 __le32 wakeups;
125
126 /* (INT_STS_ND & INT_TRIG_LOW_RSSI) */
127 __le32 low_rssi;
128} __packed;
129
130struct wl12xx_acx_wep_statistics {
131 /* WEP address keys configured */
132 __le32 addr_key_count;
133
134 /* default keys configured */
135 __le32 default_key_count;
136
137 __le32 reserved;
138
139 /* number of times that WEP key not found on lookup */
140 __le32 key_not_found;
141
142 /* number of times that WEP key decryption failed */
143 __le32 decrypt_fail;
144
145 /* WEP packets decrypted */
146 __le32 packets;
147
148 /* WEP decrypt interrupts */
149 __le32 interrupt;
150} __packed;
151
152#define ACX_MISSED_BEACONS_SPREAD 10
153
154struct wl12xx_acx_pwr_statistics {
155 /* the amount of enters into power save mode (both PD & ELP) */
156 __le32 ps_enter;
157
158 /* the amount of enters into ELP mode */
159 __le32 elp_enter;
160
161 /* the amount of missing beacon interrupts to the host */
162 __le32 missing_bcns;
163
164 /* the amount of wake on host-access times */
165 __le32 wake_on_host;
166
167 /* the amount of wake on timer-expire */
168 __le32 wake_on_timer_exp;
169
170 /* the number of packets that were transmitted with PS bit set */
171 __le32 tx_with_ps;
172
173 /* the number of packets that were transmitted with PS bit clear */
174 __le32 tx_without_ps;
175
176 /* the number of received beacons */
177 __le32 rcvd_beacons;
178
179 /* the number of entering into PowerOn (power save off) */
180 __le32 power_save_off;
181
182 /* the number of entries into power save mode */
183 __le16 enable_ps;
184
185 /*
186 * the number of exits from power save, not including failed PS
187 * transitions
188 */
189 __le16 disable_ps;
190
191 /*
192 * the number of times the TSF counter was adjusted because
193 * of drift
194 */
195 __le32 fix_tsf_ps;
196
197 /* Gives statistics about the spread continuous missed beacons.
198 * The 16 LSB are dedicated for the PS mode.
199 * The 16 MSB are dedicated for the PS mode.
200 * cont_miss_bcns_spread[0] - single missed beacon.
201 * cont_miss_bcns_spread[1] - two continuous missed beacons.
202 * cont_miss_bcns_spread[2] - three continuous missed beacons.
203 * ...
204 * cont_miss_bcns_spread[9] - ten and more continuous missed beacons.
205 */
206 __le32 cont_miss_bcns_spread[ACX_MISSED_BEACONS_SPREAD];
207
208 /* the number of beacons in awake mode */
209 __le32 rcvd_awake_beacons;
210} __packed;
211
212struct wl12xx_acx_mic_statistics {
213 __le32 rx_pkts;
214 __le32 calc_failure;
215} __packed;
216
217struct wl12xx_acx_aes_statistics {
218 __le32 encrypt_fail;
219 __le32 decrypt_fail;
220 __le32 encrypt_packets;
221 __le32 decrypt_packets;
222 __le32 encrypt_interrupt;
223 __le32 decrypt_interrupt;
224} __packed;
225
226struct wl12xx_acx_event_statistics {
227 __le32 heart_beat;
228 __le32 calibration;
229 __le32 rx_mismatch;
230 __le32 rx_mem_empty;
231 __le32 rx_pool;
232 __le32 oom_late;
233 __le32 phy_transmit_error;
234 __le32 tx_stuck;
235} __packed;
236
237struct wl12xx_acx_ps_statistics {
238 __le32 pspoll_timeouts;
239 __le32 upsd_timeouts;
240 __le32 upsd_max_sptime;
241 __le32 upsd_max_apturn;
242 __le32 pspoll_max_apturn;
243 __le32 pspoll_utilization;
244 __le32 upsd_utilization;
245} __packed;
246
247struct wl12xx_acx_rxpipe_statistics {
248 __le32 rx_prep_beacon_drop;
249 __le32 descr_host_int_trig_rx_data;
250 __le32 beacon_buffer_thres_host_int_trig_rx_data;
251 __le32 missed_beacon_host_int_trig_rx_data;
252 __le32 tx_xfr_host_int_trig_rx_data;
253} __packed;
254
255struct wl12xx_acx_statistics {
256 struct acx_header header;
257
258 struct wl12xx_acx_tx_statistics tx;
259 struct wl12xx_acx_rx_statistics rx;
260 struct wl12xx_acx_dma_statistics dma;
261 struct wl12xx_acx_isr_statistics isr;
262 struct wl12xx_acx_wep_statistics wep;
263 struct wl12xx_acx_pwr_statistics pwr;
264 struct wl12xx_acx_aes_statistics aes;
265 struct wl12xx_acx_mic_statistics mic;
266 struct wl12xx_acx_event_statistics event;
267 struct wl12xx_acx_ps_statistics ps;
268 struct wl12xx_acx_rxpipe_statistics rxpipe;
269} __packed;
270
34int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap); 271int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap);
35 272
36#endif /* __WL12XX_ACX_H__ */ 273#endif /* __WL12XX_ACX_H__ */
diff --git a/drivers/net/wireless/ti/wl12xx/cmd.c b/drivers/net/wireless/ti/wl12xx/cmd.c
index 8ffaeb5f2147..30be784a40d8 100644
--- a/drivers/net/wireless/ti/wl12xx/cmd.c
+++ b/drivers/net/wireless/ti/wl12xx/cmd.c
@@ -65,6 +65,7 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
65 struct wl1271_general_parms_cmd *gen_parms; 65 struct wl1271_general_parms_cmd *gen_parms;
66 struct wl1271_ini_general_params *gp = 66 struct wl1271_ini_general_params *gp =
67 &((struct wl1271_nvs_file *)wl->nvs)->general_params; 67 &((struct wl1271_nvs_file *)wl->nvs)->general_params;
68 struct wl12xx_priv *priv = wl->priv;
68 bool answer = false; 69 bool answer = false;
69 int ret; 70 int ret;
70 71
@@ -88,7 +89,7 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
88 answer = true; 89 answer = true;
89 90
90 /* Override the REF CLK from the NVS with the one from platform data */ 91 /* Override the REF CLK from the NVS with the one from platform data */
91 gen_parms->general_params.ref_clock = wl->ref_clock; 92 gen_parms->general_params.ref_clock = priv->ref_clock;
92 93
93 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer); 94 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer);
94 if (ret < 0) { 95 if (ret < 0) {
@@ -118,6 +119,7 @@ int wl128x_cmd_general_parms(struct wl1271 *wl)
118 struct wl128x_general_parms_cmd *gen_parms; 119 struct wl128x_general_parms_cmd *gen_parms;
119 struct wl128x_ini_general_params *gp = 120 struct wl128x_ini_general_params *gp =
120 &((struct wl128x_nvs_file *)wl->nvs)->general_params; 121 &((struct wl128x_nvs_file *)wl->nvs)->general_params;
122 struct wl12xx_priv *priv = wl->priv;
121 bool answer = false; 123 bool answer = false;
122 int ret; 124 int ret;
123 125
@@ -141,8 +143,8 @@ int wl128x_cmd_general_parms(struct wl1271 *wl)
141 answer = true; 143 answer = true;
142 144
143 /* Replace REF and TCXO CLKs with the ones from platform data */ 145 /* Replace REF and TCXO CLKs with the ones from platform data */
144 gen_parms->general_params.ref_clock = wl->ref_clock; 146 gen_parms->general_params.ref_clock = priv->ref_clock;
145 gen_parms->general_params.tcxo_ref_clock = wl->tcxo_clock; 147 gen_parms->general_params.tcxo_ref_clock = priv->tcxo_clock;
146 148
147 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer); 149 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer);
148 if (ret < 0) { 150 if (ret < 0) {
@@ -172,7 +174,7 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
172 struct wl1271_nvs_file *nvs = (struct wl1271_nvs_file *)wl->nvs; 174 struct wl1271_nvs_file *nvs = (struct wl1271_nvs_file *)wl->nvs;
173 struct wl1271_radio_parms_cmd *radio_parms; 175 struct wl1271_radio_parms_cmd *radio_parms;
174 struct wl1271_ini_general_params *gp = &nvs->general_params; 176 struct wl1271_ini_general_params *gp = &nvs->general_params;
175 int ret; 177 int ret, fem_idx;
176 178
177 if (!wl->nvs) 179 if (!wl->nvs)
178 return -ENODEV; 180 return -ENODEV;
@@ -183,11 +185,13 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
183 185
184 radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM; 186 radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM;
185 187
188 fem_idx = WL12XX_FEM_TO_NVS_ENTRY(gp->tx_bip_fem_manufacturer);
189
186 /* 2.4GHz parameters */ 190 /* 2.4GHz parameters */
187 memcpy(&radio_parms->static_params_2, &nvs->stat_radio_params_2, 191 memcpy(&radio_parms->static_params_2, &nvs->stat_radio_params_2,
188 sizeof(struct wl1271_ini_band_params_2)); 192 sizeof(struct wl1271_ini_band_params_2));
189 memcpy(&radio_parms->dyn_params_2, 193 memcpy(&radio_parms->dyn_params_2,
190 &nvs->dyn_radio_params_2[gp->tx_bip_fem_manufacturer].params, 194 &nvs->dyn_radio_params_2[fem_idx].params,
191 sizeof(struct wl1271_ini_fem_params_2)); 195 sizeof(struct wl1271_ini_fem_params_2));
192 196
193 /* 5GHz parameters */ 197 /* 5GHz parameters */
@@ -195,7 +199,7 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
195 &nvs->stat_radio_params_5, 199 &nvs->stat_radio_params_5,
196 sizeof(struct wl1271_ini_band_params_5)); 200 sizeof(struct wl1271_ini_band_params_5));
197 memcpy(&radio_parms->dyn_params_5, 201 memcpy(&radio_parms->dyn_params_5,
198 &nvs->dyn_radio_params_5[gp->tx_bip_fem_manufacturer].params, 202 &nvs->dyn_radio_params_5[fem_idx].params,
199 sizeof(struct wl1271_ini_fem_params_5)); 203 sizeof(struct wl1271_ini_fem_params_5));
200 204
201 wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ", 205 wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ",
@@ -214,7 +218,7 @@ int wl128x_cmd_radio_parms(struct wl1271 *wl)
214 struct wl128x_nvs_file *nvs = (struct wl128x_nvs_file *)wl->nvs; 218 struct wl128x_nvs_file *nvs = (struct wl128x_nvs_file *)wl->nvs;
215 struct wl128x_radio_parms_cmd *radio_parms; 219 struct wl128x_radio_parms_cmd *radio_parms;
216 struct wl128x_ini_general_params *gp = &nvs->general_params; 220 struct wl128x_ini_general_params *gp = &nvs->general_params;
217 int ret; 221 int ret, fem_idx;
218 222
219 if (!wl->nvs) 223 if (!wl->nvs)
220 return -ENODEV; 224 return -ENODEV;
@@ -225,11 +229,13 @@ int wl128x_cmd_radio_parms(struct wl1271 *wl)
225 229
226 radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM; 230 radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM;
227 231
232 fem_idx = WL12XX_FEM_TO_NVS_ENTRY(gp->tx_bip_fem_manufacturer);
233
228 /* 2.4GHz parameters */ 234 /* 2.4GHz parameters */
229 memcpy(&radio_parms->static_params_2, &nvs->stat_radio_params_2, 235 memcpy(&radio_parms->static_params_2, &nvs->stat_radio_params_2,
230 sizeof(struct wl128x_ini_band_params_2)); 236 sizeof(struct wl128x_ini_band_params_2));
231 memcpy(&radio_parms->dyn_params_2, 237 memcpy(&radio_parms->dyn_params_2,
232 &nvs->dyn_radio_params_2[gp->tx_bip_fem_manufacturer].params, 238 &nvs->dyn_radio_params_2[fem_idx].params,
233 sizeof(struct wl128x_ini_fem_params_2)); 239 sizeof(struct wl128x_ini_fem_params_2));
234 240
235 /* 5GHz parameters */ 241 /* 5GHz parameters */
@@ -237,7 +243,7 @@ int wl128x_cmd_radio_parms(struct wl1271 *wl)
237 &nvs->stat_radio_params_5, 243 &nvs->stat_radio_params_5,
238 sizeof(struct wl128x_ini_band_params_5)); 244 sizeof(struct wl128x_ini_band_params_5));
239 memcpy(&radio_parms->dyn_params_5, 245 memcpy(&radio_parms->dyn_params_5,
240 &nvs->dyn_radio_params_5[gp->tx_bip_fem_manufacturer].params, 246 &nvs->dyn_radio_params_5[fem_idx].params,
241 sizeof(struct wl128x_ini_fem_params_5)); 247 sizeof(struct wl128x_ini_fem_params_5));
242 248
243 radio_parms->fem_vendor_and_options = nvs->fem_vendor_and_options; 249 radio_parms->fem_vendor_and_options = nvs->fem_vendor_and_options;
diff --git a/drivers/net/wireless/ti/wl12xx/debugfs.c b/drivers/net/wireless/ti/wl12xx/debugfs.c
new file mode 100644
index 000000000000..0521cbf858cf
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/debugfs.c
@@ -0,0 +1,243 @@
1/*
2 * This file is part of wl12xx
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 * Copyright (C) 2011-2012 Texas Instruments
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19 * 02110-1301 USA
20 *
21 */
22
23#include "../wlcore/debugfs.h"
24#include "../wlcore/wlcore.h"
25
26#include "wl12xx.h"
27#include "acx.h"
28#include "debugfs.h"
29
30#define WL12XX_DEBUGFS_FWSTATS_FILE(a, b, c) \
31 DEBUGFS_FWSTATS_FILE(a, b, c, wl12xx_acx_statistics)
32
33WL12XX_DEBUGFS_FWSTATS_FILE(tx, internal_desc_overflow, "%u");
34
35WL12XX_DEBUGFS_FWSTATS_FILE(rx, out_of_mem, "%u");
36WL12XX_DEBUGFS_FWSTATS_FILE(rx, hdr_overflow, "%u");
37WL12XX_DEBUGFS_FWSTATS_FILE(rx, hw_stuck, "%u");
38WL12XX_DEBUGFS_FWSTATS_FILE(rx, dropped, "%u");
39WL12XX_DEBUGFS_FWSTATS_FILE(rx, fcs_err, "%u");
40WL12XX_DEBUGFS_FWSTATS_FILE(rx, xfr_hint_trig, "%u");
41WL12XX_DEBUGFS_FWSTATS_FILE(rx, path_reset, "%u");
42WL12XX_DEBUGFS_FWSTATS_FILE(rx, reset_counter, "%u");
43
44WL12XX_DEBUGFS_FWSTATS_FILE(dma, rx_requested, "%u");
45WL12XX_DEBUGFS_FWSTATS_FILE(dma, rx_errors, "%u");
46WL12XX_DEBUGFS_FWSTATS_FILE(dma, tx_requested, "%u");
47WL12XX_DEBUGFS_FWSTATS_FILE(dma, tx_errors, "%u");
48
49WL12XX_DEBUGFS_FWSTATS_FILE(isr, cmd_cmplt, "%u");
50WL12XX_DEBUGFS_FWSTATS_FILE(isr, fiqs, "%u");
51WL12XX_DEBUGFS_FWSTATS_FILE(isr, rx_headers, "%u");
52WL12XX_DEBUGFS_FWSTATS_FILE(isr, rx_mem_overflow, "%u");
53WL12XX_DEBUGFS_FWSTATS_FILE(isr, rx_rdys, "%u");
54WL12XX_DEBUGFS_FWSTATS_FILE(isr, irqs, "%u");
55WL12XX_DEBUGFS_FWSTATS_FILE(isr, tx_procs, "%u");
56WL12XX_DEBUGFS_FWSTATS_FILE(isr, decrypt_done, "%u");
57WL12XX_DEBUGFS_FWSTATS_FILE(isr, dma0_done, "%u");
58WL12XX_DEBUGFS_FWSTATS_FILE(isr, dma1_done, "%u");
59WL12XX_DEBUGFS_FWSTATS_FILE(isr, tx_exch_complete, "%u");
60WL12XX_DEBUGFS_FWSTATS_FILE(isr, commands, "%u");
61WL12XX_DEBUGFS_FWSTATS_FILE(isr, rx_procs, "%u");
62WL12XX_DEBUGFS_FWSTATS_FILE(isr, hw_pm_mode_changes, "%u");
63WL12XX_DEBUGFS_FWSTATS_FILE(isr, host_acknowledges, "%u");
64WL12XX_DEBUGFS_FWSTATS_FILE(isr, pci_pm, "%u");
65WL12XX_DEBUGFS_FWSTATS_FILE(isr, wakeups, "%u");
66WL12XX_DEBUGFS_FWSTATS_FILE(isr, low_rssi, "%u");
67
68WL12XX_DEBUGFS_FWSTATS_FILE(wep, addr_key_count, "%u");
69WL12XX_DEBUGFS_FWSTATS_FILE(wep, default_key_count, "%u");
70/* skipping wep.reserved */
71WL12XX_DEBUGFS_FWSTATS_FILE(wep, key_not_found, "%u");
72WL12XX_DEBUGFS_FWSTATS_FILE(wep, decrypt_fail, "%u");
73WL12XX_DEBUGFS_FWSTATS_FILE(wep, packets, "%u");
74WL12XX_DEBUGFS_FWSTATS_FILE(wep, interrupt, "%u");
75
76WL12XX_DEBUGFS_FWSTATS_FILE(pwr, ps_enter, "%u");
77WL12XX_DEBUGFS_FWSTATS_FILE(pwr, elp_enter, "%u");
78WL12XX_DEBUGFS_FWSTATS_FILE(pwr, missing_bcns, "%u");
79WL12XX_DEBUGFS_FWSTATS_FILE(pwr, wake_on_host, "%u");
80WL12XX_DEBUGFS_FWSTATS_FILE(pwr, wake_on_timer_exp, "%u");
81WL12XX_DEBUGFS_FWSTATS_FILE(pwr, tx_with_ps, "%u");
82WL12XX_DEBUGFS_FWSTATS_FILE(pwr, tx_without_ps, "%u");
83WL12XX_DEBUGFS_FWSTATS_FILE(pwr, rcvd_beacons, "%u");
84WL12XX_DEBUGFS_FWSTATS_FILE(pwr, power_save_off, "%u");
85WL12XX_DEBUGFS_FWSTATS_FILE(pwr, enable_ps, "%u");
86WL12XX_DEBUGFS_FWSTATS_FILE(pwr, disable_ps, "%u");
87WL12XX_DEBUGFS_FWSTATS_FILE(pwr, fix_tsf_ps, "%u");
88/* skipping cont_miss_bcns_spread for now */
89WL12XX_DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_beacons, "%u");
90
91WL12XX_DEBUGFS_FWSTATS_FILE(mic, rx_pkts, "%u");
92WL12XX_DEBUGFS_FWSTATS_FILE(mic, calc_failure, "%u");
93
94WL12XX_DEBUGFS_FWSTATS_FILE(aes, encrypt_fail, "%u");
95WL12XX_DEBUGFS_FWSTATS_FILE(aes, decrypt_fail, "%u");
96WL12XX_DEBUGFS_FWSTATS_FILE(aes, encrypt_packets, "%u");
97WL12XX_DEBUGFS_FWSTATS_FILE(aes, decrypt_packets, "%u");
98WL12XX_DEBUGFS_FWSTATS_FILE(aes, encrypt_interrupt, "%u");
99WL12XX_DEBUGFS_FWSTATS_FILE(aes, decrypt_interrupt, "%u");
100
101WL12XX_DEBUGFS_FWSTATS_FILE(event, heart_beat, "%u");
102WL12XX_DEBUGFS_FWSTATS_FILE(event, calibration, "%u");
103WL12XX_DEBUGFS_FWSTATS_FILE(event, rx_mismatch, "%u");
104WL12XX_DEBUGFS_FWSTATS_FILE(event, rx_mem_empty, "%u");
105WL12XX_DEBUGFS_FWSTATS_FILE(event, rx_pool, "%u");
106WL12XX_DEBUGFS_FWSTATS_FILE(event, oom_late, "%u");
107WL12XX_DEBUGFS_FWSTATS_FILE(event, phy_transmit_error, "%u");
108WL12XX_DEBUGFS_FWSTATS_FILE(event, tx_stuck, "%u");
109
110WL12XX_DEBUGFS_FWSTATS_FILE(ps, pspoll_timeouts, "%u");
111WL12XX_DEBUGFS_FWSTATS_FILE(ps, upsd_timeouts, "%u");
112WL12XX_DEBUGFS_FWSTATS_FILE(ps, upsd_max_sptime, "%u");
113WL12XX_DEBUGFS_FWSTATS_FILE(ps, upsd_max_apturn, "%u");
114WL12XX_DEBUGFS_FWSTATS_FILE(ps, pspoll_max_apturn, "%u");
115WL12XX_DEBUGFS_FWSTATS_FILE(ps, pspoll_utilization, "%u");
116WL12XX_DEBUGFS_FWSTATS_FILE(ps, upsd_utilization, "%u");
117
118WL12XX_DEBUGFS_FWSTATS_FILE(rxpipe, rx_prep_beacon_drop, "%u");
119WL12XX_DEBUGFS_FWSTATS_FILE(rxpipe, descr_host_int_trig_rx_data, "%u");
120WL12XX_DEBUGFS_FWSTATS_FILE(rxpipe, beacon_buffer_thres_host_int_trig_rx_data,
121 "%u");
122WL12XX_DEBUGFS_FWSTATS_FILE(rxpipe, missed_beacon_host_int_trig_rx_data, "%u");
123WL12XX_DEBUGFS_FWSTATS_FILE(rxpipe, tx_xfr_host_int_trig_rx_data, "%u");
124
125int wl12xx_debugfs_add_files(struct wl1271 *wl,
126 struct dentry *rootdir)
127{
128 int ret = 0;
129 struct dentry *entry, *stats, *moddir;
130
131 moddir = debugfs_create_dir(KBUILD_MODNAME, rootdir);
132 if (!moddir || IS_ERR(moddir)) {
133 entry = moddir;
134 goto err;
135 }
136
137 stats = debugfs_create_dir("fw_stats", moddir);
138 if (!stats || IS_ERR(stats)) {
139 entry = stats;
140 goto err;
141 }
142
143 DEBUGFS_FWSTATS_ADD(tx, internal_desc_overflow);
144
145 DEBUGFS_FWSTATS_ADD(rx, out_of_mem);
146 DEBUGFS_FWSTATS_ADD(rx, hdr_overflow);
147 DEBUGFS_FWSTATS_ADD(rx, hw_stuck);
148 DEBUGFS_FWSTATS_ADD(rx, dropped);
149 DEBUGFS_FWSTATS_ADD(rx, fcs_err);
150 DEBUGFS_FWSTATS_ADD(rx, xfr_hint_trig);
151 DEBUGFS_FWSTATS_ADD(rx, path_reset);
152 DEBUGFS_FWSTATS_ADD(rx, reset_counter);
153
154 DEBUGFS_FWSTATS_ADD(dma, rx_requested);
155 DEBUGFS_FWSTATS_ADD(dma, rx_errors);
156 DEBUGFS_FWSTATS_ADD(dma, tx_requested);
157 DEBUGFS_FWSTATS_ADD(dma, tx_errors);
158
159 DEBUGFS_FWSTATS_ADD(isr, cmd_cmplt);
160 DEBUGFS_FWSTATS_ADD(isr, fiqs);
161 DEBUGFS_FWSTATS_ADD(isr, rx_headers);
162 DEBUGFS_FWSTATS_ADD(isr, rx_mem_overflow);
163 DEBUGFS_FWSTATS_ADD(isr, rx_rdys);
164 DEBUGFS_FWSTATS_ADD(isr, irqs);
165 DEBUGFS_FWSTATS_ADD(isr, tx_procs);
166 DEBUGFS_FWSTATS_ADD(isr, decrypt_done);
167 DEBUGFS_FWSTATS_ADD(isr, dma0_done);
168 DEBUGFS_FWSTATS_ADD(isr, dma1_done);
169 DEBUGFS_FWSTATS_ADD(isr, tx_exch_complete);
170 DEBUGFS_FWSTATS_ADD(isr, commands);
171 DEBUGFS_FWSTATS_ADD(isr, rx_procs);
172 DEBUGFS_FWSTATS_ADD(isr, hw_pm_mode_changes);
173 DEBUGFS_FWSTATS_ADD(isr, host_acknowledges);
174 DEBUGFS_FWSTATS_ADD(isr, pci_pm);
175 DEBUGFS_FWSTATS_ADD(isr, wakeups);
176 DEBUGFS_FWSTATS_ADD(isr, low_rssi);
177
178 DEBUGFS_FWSTATS_ADD(wep, addr_key_count);
179 DEBUGFS_FWSTATS_ADD(wep, default_key_count);
180 /* skipping wep.reserved */
181 DEBUGFS_FWSTATS_ADD(wep, key_not_found);
182 DEBUGFS_FWSTATS_ADD(wep, decrypt_fail);
183 DEBUGFS_FWSTATS_ADD(wep, packets);
184 DEBUGFS_FWSTATS_ADD(wep, interrupt);
185
186 DEBUGFS_FWSTATS_ADD(pwr, ps_enter);
187 DEBUGFS_FWSTATS_ADD(pwr, elp_enter);
188 DEBUGFS_FWSTATS_ADD(pwr, missing_bcns);
189 DEBUGFS_FWSTATS_ADD(pwr, wake_on_host);
190 DEBUGFS_FWSTATS_ADD(pwr, wake_on_timer_exp);
191 DEBUGFS_FWSTATS_ADD(pwr, tx_with_ps);
192 DEBUGFS_FWSTATS_ADD(pwr, tx_without_ps);
193 DEBUGFS_FWSTATS_ADD(pwr, rcvd_beacons);
194 DEBUGFS_FWSTATS_ADD(pwr, power_save_off);
195 DEBUGFS_FWSTATS_ADD(pwr, enable_ps);
196 DEBUGFS_FWSTATS_ADD(pwr, disable_ps);
197 DEBUGFS_FWSTATS_ADD(pwr, fix_tsf_ps);
198 /* skipping cont_miss_bcns_spread for now */
199 DEBUGFS_FWSTATS_ADD(pwr, rcvd_awake_beacons);
200
201 DEBUGFS_FWSTATS_ADD(mic, rx_pkts);
202 DEBUGFS_FWSTATS_ADD(mic, calc_failure);
203
204 DEBUGFS_FWSTATS_ADD(aes, encrypt_fail);
205 DEBUGFS_FWSTATS_ADD(aes, decrypt_fail);
206 DEBUGFS_FWSTATS_ADD(aes, encrypt_packets);
207 DEBUGFS_FWSTATS_ADD(aes, decrypt_packets);
208 DEBUGFS_FWSTATS_ADD(aes, encrypt_interrupt);
209 DEBUGFS_FWSTATS_ADD(aes, decrypt_interrupt);
210
211 DEBUGFS_FWSTATS_ADD(event, heart_beat);
212 DEBUGFS_FWSTATS_ADD(event, calibration);
213 DEBUGFS_FWSTATS_ADD(event, rx_mismatch);
214 DEBUGFS_FWSTATS_ADD(event, rx_mem_empty);
215 DEBUGFS_FWSTATS_ADD(event, rx_pool);
216 DEBUGFS_FWSTATS_ADD(event, oom_late);
217 DEBUGFS_FWSTATS_ADD(event, phy_transmit_error);
218 DEBUGFS_FWSTATS_ADD(event, tx_stuck);
219
220 DEBUGFS_FWSTATS_ADD(ps, pspoll_timeouts);
221 DEBUGFS_FWSTATS_ADD(ps, upsd_timeouts);
222 DEBUGFS_FWSTATS_ADD(ps, upsd_max_sptime);
223 DEBUGFS_FWSTATS_ADD(ps, upsd_max_apturn);
224 DEBUGFS_FWSTATS_ADD(ps, pspoll_max_apturn);
225 DEBUGFS_FWSTATS_ADD(ps, pspoll_utilization);
226 DEBUGFS_FWSTATS_ADD(ps, upsd_utilization);
227
228 DEBUGFS_FWSTATS_ADD(rxpipe, rx_prep_beacon_drop);
229 DEBUGFS_FWSTATS_ADD(rxpipe, descr_host_int_trig_rx_data);
230 DEBUGFS_FWSTATS_ADD(rxpipe, beacon_buffer_thres_host_int_trig_rx_data);
231 DEBUGFS_FWSTATS_ADD(rxpipe, missed_beacon_host_int_trig_rx_data);
232 DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data);
233
234 return 0;
235
236err:
237 if (IS_ERR(entry))
238 ret = PTR_ERR(entry);
239 else
240 ret = -ENOMEM;
241
242 return ret;
243}
diff --git a/drivers/net/wireless/ti/wl12xx/debugfs.h b/drivers/net/wireless/ti/wl12xx/debugfs.h
new file mode 100644
index 000000000000..96898e291b78
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/debugfs.h
@@ -0,0 +1,28 @@
1/*
2 * This file is part of wl12xx
3 *
4 * Copyright (C) 2012 Texas Instruments. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#ifndef __WL12XX_DEBUGFS_H__
23#define __WL12XX_DEBUGFS_H__
24
25int wl12xx_debugfs_add_files(struct wl1271 *wl,
26 struct dentry *rootdir);
27
28#endif /* __WL12XX_DEBUGFS_H__ */
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index d7dd3def07b5..3d6c71b7a3c7 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -39,6 +39,10 @@
39#include "reg.h" 39#include "reg.h"
40#include "cmd.h" 40#include "cmd.h"
41#include "acx.h" 41#include "acx.h"
42#include "debugfs.h"
43
44static char *fref_param;
45static char *tcxo_param;
42 46
43static struct wlcore_conf wl12xx_conf = { 47static struct wlcore_conf wl12xx_conf = {
44 .sg = { 48 .sg = {
@@ -212,7 +216,7 @@ static struct wlcore_conf wl12xx_conf = {
212 .suspend_wake_up_event = CONF_WAKE_UP_EVENT_N_DTIM, 216 .suspend_wake_up_event = CONF_WAKE_UP_EVENT_N_DTIM,
213 .suspend_listen_interval = 3, 217 .suspend_listen_interval = 3,
214 .bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED, 218 .bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED,
215 .bcn_filt_ie_count = 2, 219 .bcn_filt_ie_count = 3,
216 .bcn_filt_ie = { 220 .bcn_filt_ie = {
217 [0] = { 221 [0] = {
218 .ie = WLAN_EID_CHANNEL_SWITCH, 222 .ie = WLAN_EID_CHANNEL_SWITCH,
@@ -222,9 +226,13 @@ static struct wlcore_conf wl12xx_conf = {
222 .ie = WLAN_EID_HT_OPERATION, 226 .ie = WLAN_EID_HT_OPERATION,
223 .rule = CONF_BCN_RULE_PASS_ON_CHANGE, 227 .rule = CONF_BCN_RULE_PASS_ON_CHANGE,
224 }, 228 },
229 [2] = {
230 .ie = WLAN_EID_ERP_INFO,
231 .rule = CONF_BCN_RULE_PASS_ON_CHANGE,
232 },
225 }, 233 },
226 .synch_fail_thold = 10, 234 .synch_fail_thold = 12,
227 .bss_lose_timeout = 100, 235 .bss_lose_timeout = 400,
228 .beacon_rx_timeout = 10000, 236 .beacon_rx_timeout = 10000,
229 .broadcast_timeout = 20000, 237 .broadcast_timeout = 20000,
230 .rx_broadcast_in_ps = 1, 238 .rx_broadcast_in_ps = 1,
@@ -234,10 +242,11 @@ static struct wlcore_conf wl12xx_conf = {
234 .psm_entry_retries = 8, 242 .psm_entry_retries = 8,
235 .psm_exit_retries = 16, 243 .psm_exit_retries = 16,
236 .psm_entry_nullfunc_retries = 3, 244 .psm_entry_nullfunc_retries = 3,
237 .dynamic_ps_timeout = 40, 245 .dynamic_ps_timeout = 1500,
238 .forced_ps = false, 246 .forced_ps = false,
239 .keep_alive_interval = 55000, 247 .keep_alive_interval = 55000,
240 .max_listen_interval = 20, 248 .max_listen_interval = 20,
249 .sta_sleep_auth = WL1271_PSM_ILLEGAL,
241 }, 250 },
242 .itrim = { 251 .itrim = {
243 .enable = false, 252 .enable = false,
@@ -245,7 +254,7 @@ static struct wlcore_conf wl12xx_conf = {
245 }, 254 },
246 .pm_config = { 255 .pm_config = {
247 .host_clk_settling_time = 5000, 256 .host_clk_settling_time = 5000,
248 .host_fast_wakeup_support = false 257 .host_fast_wakeup_support = CONF_FAST_WAKEUP_DISABLE,
249 }, 258 },
250 .roam_trigger = { 259 .roam_trigger = {
251 .trigger_pacing = 1, 260 .trigger_pacing = 1,
@@ -305,8 +314,8 @@ static struct wlcore_conf wl12xx_conf = {
305 .swallow_period = 5, 314 .swallow_period = 5,
306 .n_divider_fref_set_1 = 0xff, /* default */ 315 .n_divider_fref_set_1 = 0xff, /* default */
307 .n_divider_fref_set_2 = 12, 316 .n_divider_fref_set_2 = 12,
308 .m_divider_fref_set_1 = 148, 317 .m_divider_fref_set_1 = 0xffff,
309 .m_divider_fref_set_2 = 0xffff, /* default */ 318 .m_divider_fref_set_2 = 148, /* default */
310 .coex_pll_stabilization_time = 0xffffffff, /* default */ 319 .coex_pll_stabilization_time = 0xffffffff, /* default */
311 .ldo_stabilization_time = 0xffff, /* default */ 320 .ldo_stabilization_time = 0xffff, /* default */
312 .fm_disturbed_band_margin = 0xff, /* default */ 321 .fm_disturbed_band_margin = 0xff, /* default */
@@ -581,19 +590,21 @@ static const int wl12xx_rtable[REG_TABLE_LEN] = {
581}; 590};
582 591
583/* TODO: maybe move to a new header file? */ 592/* TODO: maybe move to a new header file? */
584#define WL127X_FW_NAME_MULTI "ti-connectivity/wl127x-fw-4-mr.bin" 593#define WL127X_FW_NAME_MULTI "ti-connectivity/wl127x-fw-5-mr.bin"
585#define WL127X_FW_NAME_SINGLE "ti-connectivity/wl127x-fw-4-sr.bin" 594#define WL127X_FW_NAME_SINGLE "ti-connectivity/wl127x-fw-5-sr.bin"
586#define WL127X_PLT_FW_NAME "ti-connectivity/wl127x-fw-4-plt.bin" 595#define WL127X_PLT_FW_NAME "ti-connectivity/wl127x-fw-5-plt.bin"
587 596
588#define WL128X_FW_NAME_MULTI "ti-connectivity/wl128x-fw-4-mr.bin" 597#define WL128X_FW_NAME_MULTI "ti-connectivity/wl128x-fw-5-mr.bin"
589#define WL128X_FW_NAME_SINGLE "ti-connectivity/wl128x-fw-4-sr.bin" 598#define WL128X_FW_NAME_SINGLE "ti-connectivity/wl128x-fw-5-sr.bin"
590#define WL128X_PLT_FW_NAME "ti-connectivity/wl128x-fw-4-plt.bin" 599#define WL128X_PLT_FW_NAME "ti-connectivity/wl128x-fw-5-plt.bin"
591 600
592static void wl127x_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len) 601static int wl127x_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len)
593{ 602{
603 int ret;
604
594 if (wl->chip.id != CHIP_ID_1283_PG20) { 605 if (wl->chip.id != CHIP_ID_1283_PG20) {
595 struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map; 606 struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
596 struct wl1271_rx_mem_pool_addr rx_mem_addr; 607 struct wl127x_rx_mem_pool_addr rx_mem_addr;
597 608
598 /* 609 /*
599 * Choose the block we want to read 610 * Choose the block we want to read
@@ -607,9 +618,13 @@ static void wl127x_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len)
607 618
608 rx_mem_addr.addr_extra = rx_mem_addr.addr + 4; 619 rx_mem_addr.addr_extra = rx_mem_addr.addr + 4;
609 620
610 wl1271_write(wl, WL1271_SLV_REG_DATA, 621 ret = wlcore_write(wl, WL1271_SLV_REG_DATA, &rx_mem_addr,
611 &rx_mem_addr, sizeof(rx_mem_addr), false); 622 sizeof(rx_mem_addr), false);
623 if (ret < 0)
624 return ret;
612 } 625 }
626
627 return 0;
613} 628}
614 629
615static int wl12xx_identify_chip(struct wl1271 *wl) 630static int wl12xx_identify_chip(struct wl1271 *wl)
@@ -621,10 +636,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
621 wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete", 636 wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete",
622 wl->chip.id); 637 wl->chip.id);
623 638
624 /* clear the alignment quirk, since we don't support it */ 639 wl->quirks |= WLCORE_QUIRK_LEGACY_NVS |
625 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN; 640 WLCORE_QUIRK_DUAL_PROBE_TMPL |
626 641 WLCORE_QUIRK_TKIP_HEADER_SPACE;
627 wl->quirks |= WLCORE_QUIRK_LEGACY_NVS;
628 wl->sr_fw_name = WL127X_FW_NAME_SINGLE; 642 wl->sr_fw_name = WL127X_FW_NAME_SINGLE;
629 wl->mr_fw_name = WL127X_FW_NAME_MULTI; 643 wl->mr_fw_name = WL127X_FW_NAME_MULTI;
630 memcpy(&wl->conf.mem, &wl12xx_default_priv_conf.mem_wl127x, 644 memcpy(&wl->conf.mem, &wl12xx_default_priv_conf.mem_wl127x,
@@ -633,16 +647,18 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
633 /* read data preparation is only needed by wl127x */ 647 /* read data preparation is only needed by wl127x */
634 wl->ops->prepare_read = wl127x_prepare_read; 648 wl->ops->prepare_read = wl127x_prepare_read;
635 649
650 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
651 WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
652 WL127X_MINOR_VER);
636 break; 653 break;
637 654
638 case CHIP_ID_1271_PG20: 655 case CHIP_ID_1271_PG20:
639 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)", 656 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
640 wl->chip.id); 657 wl->chip.id);
641 658
642 /* clear the alignment quirk, since we don't support it */ 659 wl->quirks |= WLCORE_QUIRK_LEGACY_NVS |
643 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN; 660 WLCORE_QUIRK_DUAL_PROBE_TMPL |
644 661 WLCORE_QUIRK_TKIP_HEADER_SPACE;
645 wl->quirks |= WLCORE_QUIRK_LEGACY_NVS;
646 wl->plt_fw_name = WL127X_PLT_FW_NAME; 662 wl->plt_fw_name = WL127X_PLT_FW_NAME;
647 wl->sr_fw_name = WL127X_FW_NAME_SINGLE; 663 wl->sr_fw_name = WL127X_FW_NAME_SINGLE;
648 wl->mr_fw_name = WL127X_FW_NAME_MULTI; 664 wl->mr_fw_name = WL127X_FW_NAME_MULTI;
@@ -652,6 +668,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
652 /* read data preparation is only needed by wl127x */ 668 /* read data preparation is only needed by wl127x */
653 wl->ops->prepare_read = wl127x_prepare_read; 669 wl->ops->prepare_read = wl127x_prepare_read;
654 670
671 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
672 WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
673 WL127X_MINOR_VER);
655 break; 674 break;
656 675
657 case CHIP_ID_1283_PG20: 676 case CHIP_ID_1283_PG20:
@@ -660,6 +679,15 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
660 wl->plt_fw_name = WL128X_PLT_FW_NAME; 679 wl->plt_fw_name = WL128X_PLT_FW_NAME;
661 wl->sr_fw_name = WL128X_FW_NAME_SINGLE; 680 wl->sr_fw_name = WL128X_FW_NAME_SINGLE;
662 wl->mr_fw_name = WL128X_FW_NAME_MULTI; 681 wl->mr_fw_name = WL128X_FW_NAME_MULTI;
682
683 /* wl128x requires TX blocksize alignment */
684 wl->quirks |= WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN |
685 WLCORE_QUIRK_DUAL_PROBE_TMPL |
686 WLCORE_QUIRK_TKIP_HEADER_SPACE;
687
688 wlcore_set_min_fw_ver(wl, WL128X_CHIP_VER, WL128X_IFTYPE_VER,
689 WL128X_MAJOR_VER, WL128X_SUBTYPE_VER,
690 WL128X_MINOR_VER);
663 break; 691 break;
664 case CHIP_ID_1283_PG10: 692 case CHIP_ID_1283_PG10:
665 default: 693 default:
@@ -672,64 +700,95 @@ out:
672 return ret; 700 return ret;
673} 701}
674 702
675static void wl12xx_top_reg_write(struct wl1271 *wl, int addr, u16 val) 703static int __must_check wl12xx_top_reg_write(struct wl1271 *wl, int addr,
704 u16 val)
676{ 705{
706 int ret;
707
677 /* write address >> 1 + 0x30000 to OCP_POR_CTR */ 708 /* write address >> 1 + 0x30000 to OCP_POR_CTR */
678 addr = (addr >> 1) + 0x30000; 709 addr = (addr >> 1) + 0x30000;
679 wl1271_write32(wl, WL12XX_OCP_POR_CTR, addr); 710 ret = wlcore_write32(wl, WL12XX_OCP_POR_CTR, addr);
711 if (ret < 0)
712 goto out;
680 713
681 /* write value to OCP_POR_WDATA */ 714 /* write value to OCP_POR_WDATA */
682 wl1271_write32(wl, WL12XX_OCP_DATA_WRITE, val); 715 ret = wlcore_write32(wl, WL12XX_OCP_DATA_WRITE, val);
716 if (ret < 0)
717 goto out;
683 718
684 /* write 1 to OCP_CMD */ 719 /* write 1 to OCP_CMD */
685 wl1271_write32(wl, WL12XX_OCP_CMD, OCP_CMD_WRITE); 720 ret = wlcore_write32(wl, WL12XX_OCP_CMD, OCP_CMD_WRITE);
721 if (ret < 0)
722 goto out;
723
724out:
725 return ret;
686} 726}
687 727
688static u16 wl12xx_top_reg_read(struct wl1271 *wl, int addr) 728static int __must_check wl12xx_top_reg_read(struct wl1271 *wl, int addr,
729 u16 *out)
689{ 730{
690 u32 val; 731 u32 val;
691 int timeout = OCP_CMD_LOOP; 732 int timeout = OCP_CMD_LOOP;
733 int ret;
692 734
693 /* write address >> 1 + 0x30000 to OCP_POR_CTR */ 735 /* write address >> 1 + 0x30000 to OCP_POR_CTR */
694 addr = (addr >> 1) + 0x30000; 736 addr = (addr >> 1) + 0x30000;
695 wl1271_write32(wl, WL12XX_OCP_POR_CTR, addr); 737 ret = wlcore_write32(wl, WL12XX_OCP_POR_CTR, addr);
738 if (ret < 0)
739 return ret;
696 740
697 /* write 2 to OCP_CMD */ 741 /* write 2 to OCP_CMD */
698 wl1271_write32(wl, WL12XX_OCP_CMD, OCP_CMD_READ); 742 ret = wlcore_write32(wl, WL12XX_OCP_CMD, OCP_CMD_READ);
743 if (ret < 0)
744 return ret;
699 745
700 /* poll for data ready */ 746 /* poll for data ready */
701 do { 747 do {
702 val = wl1271_read32(wl, WL12XX_OCP_DATA_READ); 748 ret = wlcore_read32(wl, WL12XX_OCP_DATA_READ, &val);
749 if (ret < 0)
750 return ret;
703 } while (!(val & OCP_READY_MASK) && --timeout); 751 } while (!(val & OCP_READY_MASK) && --timeout);
704 752
705 if (!timeout) { 753 if (!timeout) {
706 wl1271_warning("Top register access timed out."); 754 wl1271_warning("Top register access timed out.");
707 return 0xffff; 755 return -ETIMEDOUT;
708 } 756 }
709 757
710 /* check data status and return if OK */ 758 /* check data status and return if OK */
711 if ((val & OCP_STATUS_MASK) == OCP_STATUS_OK) 759 if ((val & OCP_STATUS_MASK) != OCP_STATUS_OK) {
712 return val & 0xffff;
713 else {
714 wl1271_warning("Top register access returned error."); 760 wl1271_warning("Top register access returned error.");
715 return 0xffff; 761 return -EIO;
716 } 762 }
763
764 if (out)
765 *out = val & 0xffff;
766
767 return 0;
717} 768}
718 769
719static int wl128x_switch_tcxo_to_fref(struct wl1271 *wl) 770static int wl128x_switch_tcxo_to_fref(struct wl1271 *wl)
720{ 771{
721 u16 spare_reg; 772 u16 spare_reg;
773 int ret;
722 774
723 /* Mask bits [2] & [8:4] in the sys_clk_cfg register */ 775 /* Mask bits [2] & [8:4] in the sys_clk_cfg register */
724 spare_reg = wl12xx_top_reg_read(wl, WL_SPARE_REG); 776 ret = wl12xx_top_reg_read(wl, WL_SPARE_REG, &spare_reg);
777 if (ret < 0)
778 return ret;
779
725 if (spare_reg == 0xFFFF) 780 if (spare_reg == 0xFFFF)
726 return -EFAULT; 781 return -EFAULT;
727 spare_reg |= (BIT(3) | BIT(5) | BIT(6)); 782 spare_reg |= (BIT(3) | BIT(5) | BIT(6));
728 wl12xx_top_reg_write(wl, WL_SPARE_REG, spare_reg); 783 ret = wl12xx_top_reg_write(wl, WL_SPARE_REG, spare_reg);
784 if (ret < 0)
785 return ret;
729 786
730 /* Enable FREF_CLK_REQ & mux MCS and coex PLLs to FREF */ 787 /* Enable FREF_CLK_REQ & mux MCS and coex PLLs to FREF */
731 wl12xx_top_reg_write(wl, SYS_CLK_CFG_REG, 788 ret = wl12xx_top_reg_write(wl, SYS_CLK_CFG_REG,
732 WL_CLK_REQ_TYPE_PG2 | MCS_PLL_CLK_SEL_FREF); 789 WL_CLK_REQ_TYPE_PG2 | MCS_PLL_CLK_SEL_FREF);
790 if (ret < 0)
791 return ret;
733 792
734 /* Delay execution for 15msec, to let the HW settle */ 793 /* Delay execution for 15msec, to let the HW settle */
735 mdelay(15); 794 mdelay(15);
@@ -740,8 +799,12 @@ static int wl128x_switch_tcxo_to_fref(struct wl1271 *wl)
740static bool wl128x_is_tcxo_valid(struct wl1271 *wl) 799static bool wl128x_is_tcxo_valid(struct wl1271 *wl)
741{ 800{
742 u16 tcxo_detection; 801 u16 tcxo_detection;
802 int ret;
803
804 ret = wl12xx_top_reg_read(wl, TCXO_CLK_DETECT_REG, &tcxo_detection);
805 if (ret < 0)
806 return false;
743 807
744 tcxo_detection = wl12xx_top_reg_read(wl, TCXO_CLK_DETECT_REG);
745 if (tcxo_detection & TCXO_DET_FAILED) 808 if (tcxo_detection & TCXO_DET_FAILED)
746 return false; 809 return false;
747 810
@@ -751,8 +814,12 @@ static bool wl128x_is_tcxo_valid(struct wl1271 *wl)
751static bool wl128x_is_fref_valid(struct wl1271 *wl) 814static bool wl128x_is_fref_valid(struct wl1271 *wl)
752{ 815{
753 u16 fref_detection; 816 u16 fref_detection;
817 int ret;
818
819 ret = wl12xx_top_reg_read(wl, FREF_CLK_DETECT_REG, &fref_detection);
820 if (ret < 0)
821 return false;
754 822
755 fref_detection = wl12xx_top_reg_read(wl, FREF_CLK_DETECT_REG);
756 if (fref_detection & FREF_CLK_DETECT_FAIL) 823 if (fref_detection & FREF_CLK_DETECT_FAIL)
757 return false; 824 return false;
758 825
@@ -761,11 +828,21 @@ static bool wl128x_is_fref_valid(struct wl1271 *wl)
761 828
762static int wl128x_manually_configure_mcs_pll(struct wl1271 *wl) 829static int wl128x_manually_configure_mcs_pll(struct wl1271 *wl)
763{ 830{
764 wl12xx_top_reg_write(wl, MCS_PLL_M_REG, MCS_PLL_M_REG_VAL); 831 int ret;
765 wl12xx_top_reg_write(wl, MCS_PLL_N_REG, MCS_PLL_N_REG_VAL);
766 wl12xx_top_reg_write(wl, MCS_PLL_CONFIG_REG, MCS_PLL_CONFIG_REG_VAL);
767 832
768 return 0; 833 ret = wl12xx_top_reg_write(wl, MCS_PLL_M_REG, MCS_PLL_M_REG_VAL);
834 if (ret < 0)
835 goto out;
836
837 ret = wl12xx_top_reg_write(wl, MCS_PLL_N_REG, MCS_PLL_N_REG_VAL);
838 if (ret < 0)
839 goto out;
840
841 ret = wl12xx_top_reg_write(wl, MCS_PLL_CONFIG_REG,
842 MCS_PLL_CONFIG_REG_VAL);
843
844out:
845 return ret;
769} 846}
770 847
771static int wl128x_configure_mcs_pll(struct wl1271 *wl, int clk) 848static int wl128x_configure_mcs_pll(struct wl1271 *wl, int clk)
@@ -773,30 +850,40 @@ static int wl128x_configure_mcs_pll(struct wl1271 *wl, int clk)
773 u16 spare_reg; 850 u16 spare_reg;
774 u16 pll_config; 851 u16 pll_config;
775 u8 input_freq; 852 u8 input_freq;
853 struct wl12xx_priv *priv = wl->priv;
854 int ret;
776 855
777 /* Mask bits [3:1] in the sys_clk_cfg register */ 856 /* Mask bits [3:1] in the sys_clk_cfg register */
778 spare_reg = wl12xx_top_reg_read(wl, WL_SPARE_REG); 857 ret = wl12xx_top_reg_read(wl, WL_SPARE_REG, &spare_reg);
858 if (ret < 0)
859 return ret;
860
779 if (spare_reg == 0xFFFF) 861 if (spare_reg == 0xFFFF)
780 return -EFAULT; 862 return -EFAULT;
781 spare_reg |= BIT(2); 863 spare_reg |= BIT(2);
782 wl12xx_top_reg_write(wl, WL_SPARE_REG, spare_reg); 864 ret = wl12xx_top_reg_write(wl, WL_SPARE_REG, spare_reg);
865 if (ret < 0)
866 return ret;
783 867
784 /* Handle special cases of the TCXO clock */ 868 /* Handle special cases of the TCXO clock */
785 if (wl->tcxo_clock == WL12XX_TCXOCLOCK_16_8 || 869 if (priv->tcxo_clock == WL12XX_TCXOCLOCK_16_8 ||
786 wl->tcxo_clock == WL12XX_TCXOCLOCK_33_6) 870 priv->tcxo_clock == WL12XX_TCXOCLOCK_33_6)
787 return wl128x_manually_configure_mcs_pll(wl); 871 return wl128x_manually_configure_mcs_pll(wl);
788 872
789 /* Set the input frequency according to the selected clock source */ 873 /* Set the input frequency according to the selected clock source */
790 input_freq = (clk & 1) + 1; 874 input_freq = (clk & 1) + 1;
791 875
792 pll_config = wl12xx_top_reg_read(wl, MCS_PLL_CONFIG_REG); 876 ret = wl12xx_top_reg_read(wl, MCS_PLL_CONFIG_REG, &pll_config);
877 if (ret < 0)
878 return ret;
879
793 if (pll_config == 0xFFFF) 880 if (pll_config == 0xFFFF)
794 return -EFAULT; 881 return -EFAULT;
795 pll_config |= (input_freq << MCS_SEL_IN_FREQ_SHIFT); 882 pll_config |= (input_freq << MCS_SEL_IN_FREQ_SHIFT);
796 pll_config |= MCS_PLL_ENABLE_HP; 883 pll_config |= MCS_PLL_ENABLE_HP;
797 wl12xx_top_reg_write(wl, MCS_PLL_CONFIG_REG, pll_config); 884 ret = wl12xx_top_reg_write(wl, MCS_PLL_CONFIG_REG, pll_config);
798 885
799 return 0; 886 return ret;
800} 887}
801 888
802/* 889/*
@@ -808,26 +895,31 @@ static int wl128x_configure_mcs_pll(struct wl1271 *wl, int clk)
808 */ 895 */
809static int wl128x_boot_clk(struct wl1271 *wl, int *selected_clock) 896static int wl128x_boot_clk(struct wl1271 *wl, int *selected_clock)
810{ 897{
898 struct wl12xx_priv *priv = wl->priv;
811 u16 sys_clk_cfg; 899 u16 sys_clk_cfg;
900 int ret;
812 901
813 /* For XTAL-only modes, FREF will be used after switching from TCXO */ 902 /* For XTAL-only modes, FREF will be used after switching from TCXO */
814 if (wl->ref_clock == WL12XX_REFCLOCK_26_XTAL || 903 if (priv->ref_clock == WL12XX_REFCLOCK_26_XTAL ||
815 wl->ref_clock == WL12XX_REFCLOCK_38_XTAL) { 904 priv->ref_clock == WL12XX_REFCLOCK_38_XTAL) {
816 if (!wl128x_switch_tcxo_to_fref(wl)) 905 if (!wl128x_switch_tcxo_to_fref(wl))
817 return -EINVAL; 906 return -EINVAL;
818 goto fref_clk; 907 goto fref_clk;
819 } 908 }
820 909
821 /* Query the HW, to determine which clock source we should use */ 910 /* Query the HW, to determine which clock source we should use */
822 sys_clk_cfg = wl12xx_top_reg_read(wl, SYS_CLK_CFG_REG); 911 ret = wl12xx_top_reg_read(wl, SYS_CLK_CFG_REG, &sys_clk_cfg);
912 if (ret < 0)
913 return ret;
914
823 if (sys_clk_cfg == 0xFFFF) 915 if (sys_clk_cfg == 0xFFFF)
824 return -EINVAL; 916 return -EINVAL;
825 if (sys_clk_cfg & PRCM_CM_EN_MUX_WLAN_FREF) 917 if (sys_clk_cfg & PRCM_CM_EN_MUX_WLAN_FREF)
826 goto fref_clk; 918 goto fref_clk;
827 919
828 /* If TCXO is either 32.736MHz or 16.368MHz, switch to FREF */ 920 /* If TCXO is either 32.736MHz or 16.368MHz, switch to FREF */
829 if (wl->tcxo_clock == WL12XX_TCXOCLOCK_16_368 || 921 if (priv->tcxo_clock == WL12XX_TCXOCLOCK_16_368 ||
830 wl->tcxo_clock == WL12XX_TCXOCLOCK_32_736) { 922 priv->tcxo_clock == WL12XX_TCXOCLOCK_32_736) {
831 if (!wl128x_switch_tcxo_to_fref(wl)) 923 if (!wl128x_switch_tcxo_to_fref(wl))
832 return -EINVAL; 924 return -EINVAL;
833 goto fref_clk; 925 goto fref_clk;
@@ -836,14 +928,14 @@ static int wl128x_boot_clk(struct wl1271 *wl, int *selected_clock)
836 /* TCXO clock is selected */ 928 /* TCXO clock is selected */
837 if (!wl128x_is_tcxo_valid(wl)) 929 if (!wl128x_is_tcxo_valid(wl))
838 return -EINVAL; 930 return -EINVAL;
839 *selected_clock = wl->tcxo_clock; 931 *selected_clock = priv->tcxo_clock;
840 goto config_mcs_pll; 932 goto config_mcs_pll;
841 933
842fref_clk: 934fref_clk:
843 /* FREF clock is selected */ 935 /* FREF clock is selected */
844 if (!wl128x_is_fref_valid(wl)) 936 if (!wl128x_is_fref_valid(wl))
845 return -EINVAL; 937 return -EINVAL;
846 *selected_clock = wl->ref_clock; 938 *selected_clock = priv->ref_clock;
847 939
848config_mcs_pll: 940config_mcs_pll:
849 return wl128x_configure_mcs_pll(wl, *selected_clock); 941 return wl128x_configure_mcs_pll(wl, *selected_clock);
@@ -851,69 +943,98 @@ config_mcs_pll:
851 943
852static int wl127x_boot_clk(struct wl1271 *wl) 944static int wl127x_boot_clk(struct wl1271 *wl)
853{ 945{
946 struct wl12xx_priv *priv = wl->priv;
854 u32 pause; 947 u32 pause;
855 u32 clk; 948 u32 clk;
949 int ret;
856 950
857 if (WL127X_PG_GET_MAJOR(wl->hw_pg_ver) < 3) 951 if (WL127X_PG_GET_MAJOR(wl->hw_pg_ver) < 3)
858 wl->quirks |= WLCORE_QUIRK_END_OF_TRANSACTION; 952 wl->quirks |= WLCORE_QUIRK_END_OF_TRANSACTION;
859 953
860 if (wl->ref_clock == CONF_REF_CLK_19_2_E || 954 if (priv->ref_clock == CONF_REF_CLK_19_2_E ||
861 wl->ref_clock == CONF_REF_CLK_38_4_E || 955 priv->ref_clock == CONF_REF_CLK_38_4_E ||
862 wl->ref_clock == CONF_REF_CLK_38_4_M_XTAL) 956 priv->ref_clock == CONF_REF_CLK_38_4_M_XTAL)
863 /* ref clk: 19.2/38.4/38.4-XTAL */ 957 /* ref clk: 19.2/38.4/38.4-XTAL */
864 clk = 0x3; 958 clk = 0x3;
865 else if (wl->ref_clock == CONF_REF_CLK_26_E || 959 else if (priv->ref_clock == CONF_REF_CLK_26_E ||
866 wl->ref_clock == CONF_REF_CLK_52_E) 960 priv->ref_clock == CONF_REF_CLK_26_M_XTAL ||
961 priv->ref_clock == CONF_REF_CLK_52_E)
867 /* ref clk: 26/52 */ 962 /* ref clk: 26/52 */
868 clk = 0x5; 963 clk = 0x5;
869 else 964 else
870 return -EINVAL; 965 return -EINVAL;
871 966
872 if (wl->ref_clock != CONF_REF_CLK_19_2_E) { 967 if (priv->ref_clock != CONF_REF_CLK_19_2_E) {
873 u16 val; 968 u16 val;
874 /* Set clock type (open drain) */ 969 /* Set clock type (open drain) */
875 val = wl12xx_top_reg_read(wl, OCP_REG_CLK_TYPE); 970 ret = wl12xx_top_reg_read(wl, OCP_REG_CLK_TYPE, &val);
971 if (ret < 0)
972 goto out;
973
876 val &= FREF_CLK_TYPE_BITS; 974 val &= FREF_CLK_TYPE_BITS;
877 wl12xx_top_reg_write(wl, OCP_REG_CLK_TYPE, val); 975 ret = wl12xx_top_reg_write(wl, OCP_REG_CLK_TYPE, val);
976 if (ret < 0)
977 goto out;
878 978
879 /* Set clock pull mode (no pull) */ 979 /* Set clock pull mode (no pull) */
880 val = wl12xx_top_reg_read(wl, OCP_REG_CLK_PULL); 980 ret = wl12xx_top_reg_read(wl, OCP_REG_CLK_PULL, &val);
981 if (ret < 0)
982 goto out;
983
881 val |= NO_PULL; 984 val |= NO_PULL;
882 wl12xx_top_reg_write(wl, OCP_REG_CLK_PULL, val); 985 ret = wl12xx_top_reg_write(wl, OCP_REG_CLK_PULL, val);
986 if (ret < 0)
987 goto out;
883 } else { 988 } else {
884 u16 val; 989 u16 val;
885 /* Set clock polarity */ 990 /* Set clock polarity */
886 val = wl12xx_top_reg_read(wl, OCP_REG_CLK_POLARITY); 991 ret = wl12xx_top_reg_read(wl, OCP_REG_CLK_POLARITY, &val);
992 if (ret < 0)
993 goto out;
994
887 val &= FREF_CLK_POLARITY_BITS; 995 val &= FREF_CLK_POLARITY_BITS;
888 val |= CLK_REQ_OUTN_SEL; 996 val |= CLK_REQ_OUTN_SEL;
889 wl12xx_top_reg_write(wl, OCP_REG_CLK_POLARITY, val); 997 ret = wl12xx_top_reg_write(wl, OCP_REG_CLK_POLARITY, val);
998 if (ret < 0)
999 goto out;
890 } 1000 }
891 1001
892 wl1271_write32(wl, WL12XX_PLL_PARAMETERS, clk); 1002 ret = wlcore_write32(wl, WL12XX_PLL_PARAMETERS, clk);
1003 if (ret < 0)
1004 goto out;
893 1005
894 pause = wl1271_read32(wl, WL12XX_PLL_PARAMETERS); 1006 ret = wlcore_read32(wl, WL12XX_PLL_PARAMETERS, &pause);
1007 if (ret < 0)
1008 goto out;
895 1009
896 wl1271_debug(DEBUG_BOOT, "pause1 0x%x", pause); 1010 wl1271_debug(DEBUG_BOOT, "pause1 0x%x", pause);
897 1011
898 pause &= ~(WU_COUNTER_PAUSE_VAL); 1012 pause &= ~(WU_COUNTER_PAUSE_VAL);
899 pause |= WU_COUNTER_PAUSE_VAL; 1013 pause |= WU_COUNTER_PAUSE_VAL;
900 wl1271_write32(wl, WL12XX_WU_COUNTER_PAUSE, pause); 1014 ret = wlcore_write32(wl, WL12XX_WU_COUNTER_PAUSE, pause);
901 1015
902 return 0; 1016out:
1017 return ret;
903} 1018}
904 1019
905static int wl1271_boot_soft_reset(struct wl1271 *wl) 1020static int wl1271_boot_soft_reset(struct wl1271 *wl)
906{ 1021{
907 unsigned long timeout; 1022 unsigned long timeout;
908 u32 boot_data; 1023 u32 boot_data;
1024 int ret = 0;
909 1025
910 /* perform soft reset */ 1026 /* perform soft reset */
911 wl1271_write32(wl, WL12XX_SLV_SOFT_RESET, ACX_SLV_SOFT_RESET_BIT); 1027 ret = wlcore_write32(wl, WL12XX_SLV_SOFT_RESET, ACX_SLV_SOFT_RESET_BIT);
1028 if (ret < 0)
1029 goto out;
912 1030
913 /* SOFT_RESET is self clearing */ 1031 /* SOFT_RESET is self clearing */
914 timeout = jiffies + usecs_to_jiffies(SOFT_RESET_MAX_TIME); 1032 timeout = jiffies + usecs_to_jiffies(SOFT_RESET_MAX_TIME);
915 while (1) { 1033 while (1) {
916 boot_data = wl1271_read32(wl, WL12XX_SLV_SOFT_RESET); 1034 ret = wlcore_read32(wl, WL12XX_SLV_SOFT_RESET, &boot_data);
1035 if (ret < 0)
1036 goto out;
1037
917 wl1271_debug(DEBUG_BOOT, "soft reset bootdata 0x%x", boot_data); 1038 wl1271_debug(DEBUG_BOOT, "soft reset bootdata 0x%x", boot_data);
918 if ((boot_data & ACX_SLV_SOFT_RESET_BIT) == 0) 1039 if ((boot_data & ACX_SLV_SOFT_RESET_BIT) == 0)
919 break; 1040 break;
@@ -929,16 +1050,20 @@ static int wl1271_boot_soft_reset(struct wl1271 *wl)
929 } 1050 }
930 1051
931 /* disable Rx/Tx */ 1052 /* disable Rx/Tx */
932 wl1271_write32(wl, WL12XX_ENABLE, 0x0); 1053 ret = wlcore_write32(wl, WL12XX_ENABLE, 0x0);
1054 if (ret < 0)
1055 goto out;
933 1056
934 /* disable auto calibration on start*/ 1057 /* disable auto calibration on start*/
935 wl1271_write32(wl, WL12XX_SPARE_A2, 0xffff); 1058 ret = wlcore_write32(wl, WL12XX_SPARE_A2, 0xffff);
936 1059
937 return 0; 1060out:
1061 return ret;
938} 1062}
939 1063
940static int wl12xx_pre_boot(struct wl1271 *wl) 1064static int wl12xx_pre_boot(struct wl1271 *wl)
941{ 1065{
1066 struct wl12xx_priv *priv = wl->priv;
942 int ret = 0; 1067 int ret = 0;
943 u32 clk; 1068 u32 clk;
944 int selected_clock = -1; 1069 int selected_clock = -1;
@@ -954,30 +1079,43 @@ static int wl12xx_pre_boot(struct wl1271 *wl)
954 } 1079 }
955 1080
956 /* Continue the ELP wake up sequence */ 1081 /* Continue the ELP wake up sequence */
957 wl1271_write32(wl, WL12XX_WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL); 1082 ret = wlcore_write32(wl, WL12XX_WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
1083 if (ret < 0)
1084 goto out;
1085
958 udelay(500); 1086 udelay(500);
959 1087
960 wlcore_set_partition(wl, &wl->ptable[PART_DRPW]); 1088 ret = wlcore_set_partition(wl, &wl->ptable[PART_DRPW]);
1089 if (ret < 0)
1090 goto out;
961 1091
962 /* Read-modify-write DRPW_SCRATCH_START register (see next state) 1092 /* Read-modify-write DRPW_SCRATCH_START register (see next state)
963 to be used by DRPw FW. The RTRIM value will be added by the FW 1093 to be used by DRPw FW. The RTRIM value will be added by the FW
964 before taking DRPw out of reset */ 1094 before taking DRPw out of reset */
965 1095
966 clk = wl1271_read32(wl, WL12XX_DRPW_SCRATCH_START); 1096 ret = wlcore_read32(wl, WL12XX_DRPW_SCRATCH_START, &clk);
1097 if (ret < 0)
1098 goto out;
967 1099
968 wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk); 1100 wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk);
969 1101
970 if (wl->chip.id == CHIP_ID_1283_PG20) 1102 if (wl->chip.id == CHIP_ID_1283_PG20)
971 clk |= ((selected_clock & 0x3) << 1) << 4; 1103 clk |= ((selected_clock & 0x3) << 1) << 4;
972 else 1104 else
973 clk |= (wl->ref_clock << 1) << 4; 1105 clk |= (priv->ref_clock << 1) << 4;
974 1106
975 wl1271_write32(wl, WL12XX_DRPW_SCRATCH_START, clk); 1107 ret = wlcore_write32(wl, WL12XX_DRPW_SCRATCH_START, clk);
1108 if (ret < 0)
1109 goto out;
976 1110
977 wlcore_set_partition(wl, &wl->ptable[PART_WORK]); 1111 ret = wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
1112 if (ret < 0)
1113 goto out;
978 1114
979 /* Disable interrupts */ 1115 /* Disable interrupts */
980 wlcore_write_reg(wl, REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL); 1116 ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
1117 if (ret < 0)
1118 goto out;
981 1119
982 ret = wl1271_boot_soft_reset(wl); 1120 ret = wl1271_boot_soft_reset(wl);
983 if (ret < 0) 1121 if (ret < 0)
@@ -987,47 +1125,72 @@ out:
987 return ret; 1125 return ret;
988} 1126}
989 1127
990static void wl12xx_pre_upload(struct wl1271 *wl) 1128static int wl12xx_pre_upload(struct wl1271 *wl)
991{ 1129{
992 u32 tmp; 1130 u32 tmp;
1131 u16 polarity;
1132 int ret;
993 1133
994 /* write firmware's last address (ie. it's length) to 1134 /* write firmware's last address (ie. it's length) to
995 * ACX_EEPROMLESS_IND_REG */ 1135 * ACX_EEPROMLESS_IND_REG */
996 wl1271_debug(DEBUG_BOOT, "ACX_EEPROMLESS_IND_REG"); 1136 wl1271_debug(DEBUG_BOOT, "ACX_EEPROMLESS_IND_REG");
997 1137
998 wl1271_write32(wl, WL12XX_EEPROMLESS_IND, WL12XX_EEPROMLESS_IND); 1138 ret = wlcore_write32(wl, WL12XX_EEPROMLESS_IND, WL12XX_EEPROMLESS_IND);
1139 if (ret < 0)
1140 goto out;
999 1141
1000 tmp = wlcore_read_reg(wl, REG_CHIP_ID_B); 1142 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &tmp);
1143 if (ret < 0)
1144 goto out;
1001 1145
1002 wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp); 1146 wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp);
1003 1147
1004 /* 6. read the EEPROM parameters */ 1148 /* 6. read the EEPROM parameters */
1005 tmp = wl1271_read32(wl, WL12XX_SCR_PAD2); 1149 ret = wlcore_read32(wl, WL12XX_SCR_PAD2, &tmp);
1150 if (ret < 0)
1151 goto out;
1006 1152
1007 /* WL1271: The reference driver skips steps 7 to 10 (jumps directly 1153 /* WL1271: The reference driver skips steps 7 to 10 (jumps directly
1008 * to upload_fw) */ 1154 * to upload_fw) */
1009 1155
1010 if (wl->chip.id == CHIP_ID_1283_PG20) 1156 if (wl->chip.id == CHIP_ID_1283_PG20) {
1011 wl12xx_top_reg_write(wl, SDIO_IO_DS, HCI_IO_DS_6MA); 1157 ret = wl12xx_top_reg_write(wl, SDIO_IO_DS, HCI_IO_DS_6MA);
1012} 1158 if (ret < 0)
1013 1159 goto out;
1014static void wl12xx_enable_interrupts(struct wl1271 *wl) 1160 }
1015{
1016 u32 polarity;
1017 1161
1018 polarity = wl12xx_top_reg_read(wl, OCP_REG_POLARITY); 1162 /* polarity must be set before the firmware is loaded */
1163 ret = wl12xx_top_reg_read(wl, OCP_REG_POLARITY, &polarity);
1164 if (ret < 0)
1165 goto out;
1019 1166
1020 /* We use HIGH polarity, so unset the LOW bit */ 1167 /* We use HIGH polarity, so unset the LOW bit */
1021 polarity &= ~POLARITY_LOW; 1168 polarity &= ~POLARITY_LOW;
1022 wl12xx_top_reg_write(wl, OCP_REG_POLARITY, polarity); 1169 ret = wl12xx_top_reg_write(wl, OCP_REG_POLARITY, polarity);
1023 1170
1024 wlcore_write_reg(wl, REG_INTERRUPT_MASK, WL1271_ACX_ALL_EVENTS_VECTOR); 1171out:
1172 return ret;
1173}
1174
1175static int wl12xx_enable_interrupts(struct wl1271 *wl)
1176{
1177 int ret;
1178
1179 ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK,
1180 WL12XX_ACX_ALL_EVENTS_VECTOR);
1181 if (ret < 0)
1182 goto out;
1025 1183
1026 wlcore_enable_interrupts(wl); 1184 wlcore_enable_interrupts(wl);
1027 wlcore_write_reg(wl, REG_INTERRUPT_MASK, 1185 ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK,
1028 WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK)); 1186 WL1271_ACX_INTR_ALL & ~(WL12XX_INTR_MASK));
1187 if (ret < 0)
1188 goto out;
1189
1190 ret = wlcore_write32(wl, WL12XX_HI_CFG, HI_CFG_DEF_VAL);
1029 1191
1030 wl1271_write32(wl, WL12XX_HI_CFG, HI_CFG_DEF_VAL); 1192out:
1193 return ret;
1031} 1194}
1032 1195
1033static int wl12xx_boot(struct wl1271 *wl) 1196static int wl12xx_boot(struct wl1271 *wl)
@@ -1042,7 +1205,9 @@ static int wl12xx_boot(struct wl1271 *wl)
1042 if (ret < 0) 1205 if (ret < 0)
1043 goto out; 1206 goto out;
1044 1207
1045 wl12xx_pre_upload(wl); 1208 ret = wl12xx_pre_upload(wl);
1209 if (ret < 0)
1210 goto out;
1046 1211
1047 ret = wlcore_boot_upload_firmware(wl); 1212 ret = wlcore_boot_upload_firmware(wl);
1048 if (ret < 0) 1213 if (ret < 0)
@@ -1052,22 +1217,30 @@ static int wl12xx_boot(struct wl1271 *wl)
1052 if (ret < 0) 1217 if (ret < 0)
1053 goto out; 1218 goto out;
1054 1219
1055 wl12xx_enable_interrupts(wl); 1220 ret = wl12xx_enable_interrupts(wl);
1056 1221
1057out: 1222out:
1058 return ret; 1223 return ret;
1059} 1224}
1060 1225
1061static void wl12xx_trigger_cmd(struct wl1271 *wl, int cmd_box_addr, 1226static int wl12xx_trigger_cmd(struct wl1271 *wl, int cmd_box_addr,
1062 void *buf, size_t len) 1227 void *buf, size_t len)
1063{ 1228{
1064 wl1271_write(wl, cmd_box_addr, buf, len, false); 1229 int ret;
1065 wlcore_write_reg(wl, REG_INTERRUPT_TRIG, WL12XX_INTR_TRIG_CMD); 1230
1231 ret = wlcore_write(wl, cmd_box_addr, buf, len, false);
1232 if (ret < 0)
1233 return ret;
1234
1235 ret = wlcore_write_reg(wl, REG_INTERRUPT_TRIG, WL12XX_INTR_TRIG_CMD);
1236
1237 return ret;
1066} 1238}
1067 1239
1068static void wl12xx_ack_event(struct wl1271 *wl) 1240static int wl12xx_ack_event(struct wl1271 *wl)
1069{ 1241{
1070 wlcore_write_reg(wl, REG_INTERRUPT_TRIG, WL12XX_INTR_TRIG_EVENT_ACK); 1242 return wlcore_write_reg(wl, REG_INTERRUPT_TRIG,
1243 WL12XX_INTR_TRIG_EVENT_ACK);
1071} 1244}
1072 1245
1073static u32 wl12xx_calc_tx_blocks(struct wl1271 *wl, u32 len, u32 spare_blks) 1246static u32 wl12xx_calc_tx_blocks(struct wl1271 *wl, u32 len, u32 spare_blks)
@@ -1147,12 +1320,13 @@ static u32 wl12xx_get_rx_packet_len(struct wl1271 *wl, void *rx_data,
1147 return data_len - sizeof(*desc) - desc->pad_len; 1320 return data_len - sizeof(*desc) - desc->pad_len;
1148} 1321}
1149 1322
1150static void wl12xx_tx_delayed_compl(struct wl1271 *wl) 1323static int wl12xx_tx_delayed_compl(struct wl1271 *wl)
1151{ 1324{
1152 if (wl->fw_status->tx_results_counter == (wl->tx_results_count & 0xff)) 1325 if (wl->fw_status_1->tx_results_counter ==
1153 return; 1326 (wl->tx_results_count & 0xff))
1327 return 0;
1154 1328
1155 wl1271_tx_complete(wl); 1329 return wlcore_tx_complete(wl);
1156} 1330}
1157 1331
1158static int wl12xx_hw_init(struct wl1271 *wl) 1332static int wl12xx_hw_init(struct wl1271 *wl)
@@ -1253,45 +1427,144 @@ static bool wl12xx_mac_in_fuse(struct wl1271 *wl)
1253 return supported; 1427 return supported;
1254} 1428}
1255 1429
1256static void wl12xx_get_fuse_mac(struct wl1271 *wl) 1430static int wl12xx_get_fuse_mac(struct wl1271 *wl)
1257{ 1431{
1258 u32 mac1, mac2; 1432 u32 mac1, mac2;
1433 int ret;
1259 1434
1260 wlcore_set_partition(wl, &wl->ptable[PART_DRPW]); 1435 ret = wlcore_set_partition(wl, &wl->ptable[PART_DRPW]);
1436 if (ret < 0)
1437 goto out;
1261 1438
1262 mac1 = wl1271_read32(wl, WL12XX_REG_FUSE_BD_ADDR_1); 1439 ret = wlcore_read32(wl, WL12XX_REG_FUSE_BD_ADDR_1, &mac1);
1263 mac2 = wl1271_read32(wl, WL12XX_REG_FUSE_BD_ADDR_2); 1440 if (ret < 0)
1441 goto out;
1442
1443 ret = wlcore_read32(wl, WL12XX_REG_FUSE_BD_ADDR_2, &mac2);
1444 if (ret < 0)
1445 goto out;
1264 1446
1265 /* these are the two parts of the BD_ADDR */ 1447 /* these are the two parts of the BD_ADDR */
1266 wl->fuse_oui_addr = ((mac2 & 0xffff) << 8) + 1448 wl->fuse_oui_addr = ((mac2 & 0xffff) << 8) +
1267 ((mac1 & 0xff000000) >> 24); 1449 ((mac1 & 0xff000000) >> 24);
1268 wl->fuse_nic_addr = mac1 & 0xffffff; 1450 wl->fuse_nic_addr = mac1 & 0xffffff;
1269 1451
1270 wlcore_set_partition(wl, &wl->ptable[PART_DOWN]); 1452 ret = wlcore_set_partition(wl, &wl->ptable[PART_DOWN]);
1453
1454out:
1455 return ret;
1271} 1456}
1272 1457
1273static s8 wl12xx_get_pg_ver(struct wl1271 *wl) 1458static int wl12xx_get_pg_ver(struct wl1271 *wl, s8 *ver)
1274{ 1459{
1275 u32 die_info; 1460 u16 die_info;
1461 int ret;
1276 1462
1277 if (wl->chip.id == CHIP_ID_1283_PG20) 1463 if (wl->chip.id == CHIP_ID_1283_PG20)
1278 die_info = wl12xx_top_reg_read(wl, WL128X_REG_FUSE_DATA_2_1); 1464 ret = wl12xx_top_reg_read(wl, WL128X_REG_FUSE_DATA_2_1,
1465 &die_info);
1279 else 1466 else
1280 die_info = wl12xx_top_reg_read(wl, WL127X_REG_FUSE_DATA_2_1); 1467 ret = wl12xx_top_reg_read(wl, WL127X_REG_FUSE_DATA_2_1,
1468 &die_info);
1469
1470 if (ret >= 0 && ver)
1471 *ver = (s8)((die_info & PG_VER_MASK) >> PG_VER_OFFSET);
1281 1472
1282 return (s8) (die_info & PG_VER_MASK) >> PG_VER_OFFSET; 1473 return ret;
1283} 1474}
1284 1475
1285static void wl12xx_get_mac(struct wl1271 *wl) 1476static int wl12xx_get_mac(struct wl1271 *wl)
1286{ 1477{
1287 if (wl12xx_mac_in_fuse(wl)) 1478 if (wl12xx_mac_in_fuse(wl))
1288 wl12xx_get_fuse_mac(wl); 1479 return wl12xx_get_fuse_mac(wl);
1480
1481 return 0;
1482}
1483
1484static void wl12xx_set_tx_desc_csum(struct wl1271 *wl,
1485 struct wl1271_tx_hw_descr *desc,
1486 struct sk_buff *skb)
1487{
1488 desc->wl12xx_reserved = 0;
1489}
1490
1491static int wl12xx_plt_init(struct wl1271 *wl)
1492{
1493 int ret;
1494
1495 ret = wl->ops->boot(wl);
1496 if (ret < 0)
1497 goto out;
1498
1499 ret = wl->ops->hw_init(wl);
1500 if (ret < 0)
1501 goto out_irq_disable;
1502
1503 ret = wl1271_acx_init_mem_config(wl);
1504 if (ret < 0)
1505 goto out_irq_disable;
1506
1507 ret = wl12xx_acx_mem_cfg(wl);
1508 if (ret < 0)
1509 goto out_free_memmap;
1510
1511 /* Enable data path */
1512 ret = wl1271_cmd_data_path(wl, 1);
1513 if (ret < 0)
1514 goto out_free_memmap;
1515
1516 /* Configure for CAM power saving (ie. always active) */
1517 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
1518 if (ret < 0)
1519 goto out_free_memmap;
1520
1521 /* configure PM */
1522 ret = wl1271_acx_pm_config(wl);
1523 if (ret < 0)
1524 goto out_free_memmap;
1525
1526 goto out;
1527
1528out_free_memmap:
1529 kfree(wl->target_mem_map);
1530 wl->target_mem_map = NULL;
1531
1532out_irq_disable:
1533 mutex_unlock(&wl->mutex);
1534 /* Unlocking the mutex in the middle of handling is
1535 inherently unsafe. In this case we deem it safe to do,
1536 because we need to let any possibly pending IRQ out of
1537 the system (and while we are WL1271_STATE_OFF the IRQ
1538 work function will not do anything.) Also, any other
1539 possible concurrent operations will fail due to the
1540 current state, hence the wl1271 struct should be safe. */
1541 wlcore_disable_interrupts(wl);
1542 mutex_lock(&wl->mutex);
1543out:
1544 return ret;
1545}
1546
1547static int wl12xx_get_spare_blocks(struct wl1271 *wl, bool is_gem)
1548{
1549 if (is_gem)
1550 return WL12XX_TX_HW_BLOCK_GEM_SPARE;
1551
1552 return WL12XX_TX_HW_BLOCK_SPARE_DEFAULT;
1553}
1554
1555static int wl12xx_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
1556 struct ieee80211_vif *vif,
1557 struct ieee80211_sta *sta,
1558 struct ieee80211_key_conf *key_conf)
1559{
1560 return wlcore_set_key(wl, cmd, vif, sta, key_conf);
1289} 1561}
1290 1562
1291static struct wlcore_ops wl12xx_ops = { 1563static struct wlcore_ops wl12xx_ops = {
1292 .identify_chip = wl12xx_identify_chip, 1564 .identify_chip = wl12xx_identify_chip,
1293 .identify_fw = wl12xx_identify_fw, 1565 .identify_fw = wl12xx_identify_fw,
1294 .boot = wl12xx_boot, 1566 .boot = wl12xx_boot,
1567 .plt_init = wl12xx_plt_init,
1295 .trigger_cmd = wl12xx_trigger_cmd, 1568 .trigger_cmd = wl12xx_trigger_cmd,
1296 .ack_event = wl12xx_ack_event, 1569 .ack_event = wl12xx_ack_event,
1297 .calc_tx_blocks = wl12xx_calc_tx_blocks, 1570 .calc_tx_blocks = wl12xx_calc_tx_blocks,
@@ -1306,6 +1579,13 @@ static struct wlcore_ops wl12xx_ops = {
1306 .sta_get_ap_rate_mask = wl12xx_sta_get_ap_rate_mask, 1579 .sta_get_ap_rate_mask = wl12xx_sta_get_ap_rate_mask,
1307 .get_pg_ver = wl12xx_get_pg_ver, 1580 .get_pg_ver = wl12xx_get_pg_ver,
1308 .get_mac = wl12xx_get_mac, 1581 .get_mac = wl12xx_get_mac,
1582 .set_tx_desc_csum = wl12xx_set_tx_desc_csum,
1583 .set_rx_csum = NULL,
1584 .ap_get_mimo_wide_rate_mask = NULL,
1585 .debugfs_init = wl12xx_debugfs_add_files,
1586 .get_spare_blocks = wl12xx_get_spare_blocks,
1587 .set_key = wl12xx_set_key,
1588 .pre_pkt_send = NULL,
1309}; 1589};
1310 1590
1311static struct ieee80211_sta_ht_cap wl12xx_ht_cap = { 1591static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
@@ -1323,6 +1603,7 @@ static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
1323 1603
1324static int __devinit wl12xx_probe(struct platform_device *pdev) 1604static int __devinit wl12xx_probe(struct platform_device *pdev)
1325{ 1605{
1606 struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
1326 struct wl1271 *wl; 1607 struct wl1271 *wl;
1327 struct ieee80211_hw *hw; 1608 struct ieee80211_hw *hw;
1328 struct wl12xx_priv *priv; 1609 struct wl12xx_priv *priv;
@@ -1334,19 +1615,63 @@ static int __devinit wl12xx_probe(struct platform_device *pdev)
1334 } 1615 }
1335 1616
1336 wl = hw->priv; 1617 wl = hw->priv;
1618 priv = wl->priv;
1337 wl->ops = &wl12xx_ops; 1619 wl->ops = &wl12xx_ops;
1338 wl->ptable = wl12xx_ptable; 1620 wl->ptable = wl12xx_ptable;
1339 wl->rtable = wl12xx_rtable; 1621 wl->rtable = wl12xx_rtable;
1340 wl->num_tx_desc = 16; 1622 wl->num_tx_desc = 16;
1341 wl->normal_tx_spare = WL12XX_TX_HW_BLOCK_SPARE_DEFAULT; 1623 wl->num_rx_desc = 8;
1342 wl->gem_tx_spare = WL12XX_TX_HW_BLOCK_GEM_SPARE;
1343 wl->band_rate_to_idx = wl12xx_band_rate_to_idx; 1624 wl->band_rate_to_idx = wl12xx_band_rate_to_idx;
1344 wl->hw_tx_rate_tbl_size = WL12XX_CONF_HW_RXTX_RATE_MAX; 1625 wl->hw_tx_rate_tbl_size = WL12XX_CONF_HW_RXTX_RATE_MAX;
1345 wl->hw_min_ht_rate = WL12XX_CONF_HW_RXTX_RATE_MCS0; 1626 wl->hw_min_ht_rate = WL12XX_CONF_HW_RXTX_RATE_MCS0;
1346 wl->fw_status_priv_len = 0; 1627 wl->fw_status_priv_len = 0;
1347 memcpy(&wl->ht_cap, &wl12xx_ht_cap, sizeof(wl12xx_ht_cap)); 1628 wl->stats.fw_stats_len = sizeof(struct wl12xx_acx_statistics);
1629 wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ, &wl12xx_ht_cap);
1630 wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ, &wl12xx_ht_cap);
1348 wl12xx_conf_init(wl); 1631 wl12xx_conf_init(wl);
1349 1632
1633 if (!fref_param) {
1634 priv->ref_clock = pdata->board_ref_clock;
1635 } else {
1636 if (!strcmp(fref_param, "19.2"))
1637 priv->ref_clock = WL12XX_REFCLOCK_19;
1638 else if (!strcmp(fref_param, "26"))
1639 priv->ref_clock = WL12XX_REFCLOCK_26;
1640 else if (!strcmp(fref_param, "26x"))
1641 priv->ref_clock = WL12XX_REFCLOCK_26_XTAL;
1642 else if (!strcmp(fref_param, "38.4"))
1643 priv->ref_clock = WL12XX_REFCLOCK_38;
1644 else if (!strcmp(fref_param, "38.4x"))
1645 priv->ref_clock = WL12XX_REFCLOCK_38_XTAL;
1646 else if (!strcmp(fref_param, "52"))
1647 priv->ref_clock = WL12XX_REFCLOCK_52;
1648 else
1649 wl1271_error("Invalid fref parameter %s", fref_param);
1650 }
1651
1652 if (!tcxo_param) {
1653 priv->tcxo_clock = pdata->board_tcxo_clock;
1654 } else {
1655 if (!strcmp(tcxo_param, "19.2"))
1656 priv->tcxo_clock = WL12XX_TCXOCLOCK_19_2;
1657 else if (!strcmp(tcxo_param, "26"))
1658 priv->tcxo_clock = WL12XX_TCXOCLOCK_26;
1659 else if (!strcmp(tcxo_param, "38.4"))
1660 priv->tcxo_clock = WL12XX_TCXOCLOCK_38_4;
1661 else if (!strcmp(tcxo_param, "52"))
1662 priv->tcxo_clock = WL12XX_TCXOCLOCK_52;
1663 else if (!strcmp(tcxo_param, "16.368"))
1664 priv->tcxo_clock = WL12XX_TCXOCLOCK_16_368;
1665 else if (!strcmp(tcxo_param, "32.736"))
1666 priv->tcxo_clock = WL12XX_TCXOCLOCK_32_736;
1667 else if (!strcmp(tcxo_param, "16.8"))
1668 priv->tcxo_clock = WL12XX_TCXOCLOCK_16_8;
1669 else if (!strcmp(tcxo_param, "33.6"))
1670 priv->tcxo_clock = WL12XX_TCXOCLOCK_33_6;
1671 else
1672 wl1271_error("Invalid tcxo parameter %s", tcxo_param);
1673 }
1674
1350 return wlcore_probe(wl, pdev); 1675 return wlcore_probe(wl, pdev);
1351} 1676}
1352 1677
@@ -1378,6 +1703,13 @@ static void __exit wl12xx_exit(void)
1378} 1703}
1379module_exit(wl12xx_exit); 1704module_exit(wl12xx_exit);
1380 1705
1706module_param_named(fref, fref_param, charp, 0);
1707MODULE_PARM_DESC(fref, "FREF clock: 19.2, 26, 26x, 38.4, 38.4x, 52");
1708
1709module_param_named(tcxo, tcxo_param, charp, 0);
1710MODULE_PARM_DESC(tcxo,
1711 "TCXO clock: 19.2, 26, 38.4, 52, 16.368, 32.736, 16.8, 33.6");
1712
1381MODULE_LICENSE("GPL v2"); 1713MODULE_LICENSE("GPL v2");
1382MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); 1714MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
1383MODULE_FIRMWARE(WL127X_FW_NAME_SINGLE); 1715MODULE_FIRMWARE(WL127X_FW_NAME_SINGLE);
diff --git a/drivers/net/wireless/ti/wl12xx/wl12xx.h b/drivers/net/wireless/ti/wl12xx/wl12xx.h
index 74cd332e23ef..26990fb4edea 100644
--- a/drivers/net/wireless/ti/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/ti/wl12xx/wl12xx.h
@@ -24,8 +24,30 @@
24 24
25#include "conf.h" 25#include "conf.h"
26 26
27/* minimum FW required for driver for wl127x */
28#define WL127X_CHIP_VER 6
29#define WL127X_IFTYPE_VER 3
30#define WL127X_MAJOR_VER 10
31#define WL127X_SUBTYPE_VER 2
32#define WL127X_MINOR_VER 115
33
34/* minimum FW required for driver for wl128x */
35#define WL128X_CHIP_VER 7
36#define WL128X_IFTYPE_VER 3
37#define WL128X_MAJOR_VER 10
38#define WL128X_SUBTYPE_VER 2
39#define WL128X_MINOR_VER 115
40
41struct wl127x_rx_mem_pool_addr {
42 u32 addr;
43 u32 addr_extra;
44};
45
27struct wl12xx_priv { 46struct wl12xx_priv {
28 struct wl12xx_priv_conf conf; 47 struct wl12xx_priv_conf conf;
48
49 int ref_clock;
50 int tcxo_clock;
29}; 51};
30 52
31#endif /* __WL12XX_PRIV_H__ */ 53#endif /* __WL12XX_PRIV_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/Kconfig b/drivers/net/wireless/ti/wl18xx/Kconfig
new file mode 100644
index 000000000000..1cfdb2548821
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/Kconfig
@@ -0,0 +1,7 @@
1config WL18XX
2 tristate "TI wl18xx support"
3 depends on MAC80211
4 select WLCORE
5 ---help---
6 This module adds support for wireless adapters based on TI
7 WiLink 8 chipsets.
diff --git a/drivers/net/wireless/ti/wl18xx/Makefile b/drivers/net/wireless/ti/wl18xx/Makefile
new file mode 100644
index 000000000000..67c098734c7f
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/Makefile
@@ -0,0 +1,3 @@
1wl18xx-objs = main.o acx.o tx.o io.o debugfs.o
2
3obj-$(CONFIG_WL18XX) += wl18xx.o
diff --git a/drivers/net/wireless/ti/wl18xx/acx.c b/drivers/net/wireless/ti/wl18xx/acx.c
new file mode 100644
index 000000000000..72840e23bf59
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/acx.c
@@ -0,0 +1,111 @@
1/*
2 * This file is part of wl18xx
3 *
4 * Copyright (C) 2011 Texas Instruments Inc.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#include "../wlcore/cmd.h"
23#include "../wlcore/debug.h"
24#include "../wlcore/acx.h"
25
26#include "acx.h"
27
28int wl18xx_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap,
29 u32 sdio_blk_size, u32 extra_mem_blks,
30 u32 len_field_size)
31{
32 struct wl18xx_acx_host_config_bitmap *bitmap_conf;
33 int ret;
34
35 wl1271_debug(DEBUG_ACX, "acx cfg bitmap %d blk %d spare %d field %d",
36 host_cfg_bitmap, sdio_blk_size, extra_mem_blks,
37 len_field_size);
38
39 bitmap_conf = kzalloc(sizeof(*bitmap_conf), GFP_KERNEL);
40 if (!bitmap_conf) {
41 ret = -ENOMEM;
42 goto out;
43 }
44
45 bitmap_conf->host_cfg_bitmap = cpu_to_le32(host_cfg_bitmap);
46 bitmap_conf->host_sdio_block_size = cpu_to_le32(sdio_blk_size);
47 bitmap_conf->extra_mem_blocks = cpu_to_le32(extra_mem_blks);
48 bitmap_conf->length_field_size = cpu_to_le32(len_field_size);
49
50 ret = wl1271_cmd_configure(wl, ACX_HOST_IF_CFG_BITMAP,
51 bitmap_conf, sizeof(*bitmap_conf));
52 if (ret < 0) {
53 wl1271_warning("wl1271 bitmap config opt failed: %d", ret);
54 goto out;
55 }
56
57out:
58 kfree(bitmap_conf);
59
60 return ret;
61}
62
63int wl18xx_acx_set_checksum_state(struct wl1271 *wl)
64{
65 struct wl18xx_acx_checksum_state *acx;
66 int ret;
67
68 wl1271_debug(DEBUG_ACX, "acx checksum state");
69
70 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
71 if (!acx) {
72 ret = -ENOMEM;
73 goto out;
74 }
75
76 acx->checksum_state = CHECKSUM_OFFLOAD_ENABLED;
77
78 ret = wl1271_cmd_configure(wl, ACX_CHECKSUM_CONFIG, acx, sizeof(*acx));
79 if (ret < 0) {
80 wl1271_warning("failed to set Tx checksum state: %d", ret);
81 goto out;
82 }
83
84out:
85 kfree(acx);
86 return ret;
87}
88
89int wl18xx_acx_clear_statistics(struct wl1271 *wl)
90{
91 struct wl18xx_acx_clear_statistics *acx;
92 int ret = 0;
93
94 wl1271_debug(DEBUG_ACX, "acx clear statistics");
95
96 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
97 if (!acx) {
98 ret = -ENOMEM;
99 goto out;
100 }
101
102 ret = wl1271_cmd_configure(wl, ACX_CLEAR_STATISTICS, acx, sizeof(*acx));
103 if (ret < 0) {
104 wl1271_warning("failed to clear firmware statistics: %d", ret);
105 goto out;
106 }
107
108out:
109 kfree(acx);
110 return ret;
111}
diff --git a/drivers/net/wireless/ti/wl18xx/acx.h b/drivers/net/wireless/ti/wl18xx/acx.h
new file mode 100644
index 000000000000..e2609a6b7341
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/acx.h
@@ -0,0 +1,287 @@
1/*
2 * This file is part of wl18xx
3 *
4 * Copyright (C) 2011 Texas Instruments. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#ifndef __WL18XX_ACX_H__
23#define __WL18XX_ACX_H__
24
25#include "../wlcore/wlcore.h"
26#include "../wlcore/acx.h"
27
28enum {
29 ACX_CLEAR_STATISTICS = 0x0047,
30};
31
32/* numbers of bits the length field takes (add 1 for the actual number) */
33#define WL18XX_HOST_IF_LEN_SIZE_FIELD 15
34
35#define WL18XX_ACX_EVENTS_VECTOR (WL1271_ACX_INTR_WATCHDOG | \
36 WL1271_ACX_INTR_INIT_COMPLETE | \
37 WL1271_ACX_INTR_EVENT_A | \
38 WL1271_ACX_INTR_EVENT_B | \
39 WL1271_ACX_INTR_CMD_COMPLETE | \
40 WL1271_ACX_INTR_HW_AVAILABLE | \
41 WL1271_ACX_INTR_DATA | \
42 WL1271_ACX_SW_INTR_WATCHDOG)
43
44#define WL18XX_INTR_MASK (WL1271_ACX_INTR_WATCHDOG | \
45 WL1271_ACX_INTR_EVENT_A | \
46 WL1271_ACX_INTR_EVENT_B | \
47 WL1271_ACX_INTR_HW_AVAILABLE | \
48 WL1271_ACX_INTR_DATA | \
49 WL1271_ACX_SW_INTR_WATCHDOG)
50
51struct wl18xx_acx_host_config_bitmap {
52 struct acx_header header;
53
54 __le32 host_cfg_bitmap;
55
56 __le32 host_sdio_block_size;
57
58 /* extra mem blocks per frame in TX. */
59 __le32 extra_mem_blocks;
60
61 /*
62 * number of bits of the length field in the first TX word
63 * (up to 15 - for using the entire 16 bits).
64 */
65 __le32 length_field_size;
66
67} __packed;
68
69enum {
70 CHECKSUM_OFFLOAD_DISABLED = 0,
71 CHECKSUM_OFFLOAD_ENABLED = 1,
72 CHECKSUM_OFFLOAD_FAKE_RX = 2,
73 CHECKSUM_OFFLOAD_INVALID = 0xFF
74};
75
76struct wl18xx_acx_checksum_state {
77 struct acx_header header;
78
79 /* enum acx_checksum_state */
80 u8 checksum_state;
81 u8 pad[3];
82} __packed;
83
84
85struct wl18xx_acx_error_stats {
86 u32 error_frame;
87 u32 error_null_Frame_tx_start;
88 u32 error_numll_frame_cts_start;
89 u32 error_bar_retry;
90 u32 error_frame_cts_nul_flid;
91} __packed;
92
93struct wl18xx_acx_debug_stats {
94 u32 debug1;
95 u32 debug2;
96 u32 debug3;
97 u32 debug4;
98 u32 debug5;
99 u32 debug6;
100} __packed;
101
102struct wl18xx_acx_ring_stats {
103 u32 prepared_descs;
104 u32 tx_cmplt;
105} __packed;
106
107struct wl18xx_acx_tx_stats {
108 u32 tx_prepared_descs;
109 u32 tx_cmplt;
110 u32 tx_template_prepared;
111 u32 tx_data_prepared;
112 u32 tx_template_programmed;
113 u32 tx_data_programmed;
114 u32 tx_burst_programmed;
115 u32 tx_starts;
116 u32 tx_imm_resp;
117 u32 tx_start_templates;
118 u32 tx_start_int_templates;
119 u32 tx_start_fw_gen;
120 u32 tx_start_data;
121 u32 tx_start_null_frame;
122 u32 tx_exch;
123 u32 tx_retry_template;
124 u32 tx_retry_data;
125 u32 tx_exch_pending;
126 u32 tx_exch_expiry;
127 u32 tx_done_template;
128 u32 tx_done_data;
129 u32 tx_done_int_template;
130 u32 tx_frame_checksum;
131 u32 tx_checksum_result;
132 u32 frag_called;
133 u32 frag_mpdu_alloc_failed;
134 u32 frag_init_called;
135 u32 frag_in_process_called;
136 u32 frag_tkip_called;
137 u32 frag_key_not_found;
138 u32 frag_need_fragmentation;
139 u32 frag_bad_mblk_num;
140 u32 frag_failed;
141 u32 frag_cache_hit;
142 u32 frag_cache_miss;
143} __packed;
144
145struct wl18xx_acx_rx_stats {
146 u32 rx_beacon_early_term;
147 u32 rx_out_of_mpdu_nodes;
148 u32 rx_hdr_overflow;
149 u32 rx_dropped_frame;
150 u32 rx_done_stage;
151 u32 rx_done;
152 u32 rx_defrag;
153 u32 rx_defrag_end;
154 u32 rx_cmplt;
155 u32 rx_pre_complt;
156 u32 rx_cmplt_task;
157 u32 rx_phy_hdr;
158 u32 rx_timeout;
159 u32 rx_timeout_wa;
160 u32 rx_wa_density_dropped_frame;
161 u32 rx_wa_ba_not_expected;
162 u32 rx_frame_checksum;
163 u32 rx_checksum_result;
164 u32 defrag_called;
165 u32 defrag_init_called;
166 u32 defrag_in_process_called;
167 u32 defrag_tkip_called;
168 u32 defrag_need_defrag;
169 u32 defrag_decrypt_failed;
170 u32 decrypt_key_not_found;
171 u32 defrag_need_decrypt;
172 u32 rx_tkip_replays;
173} __packed;
174
175struct wl18xx_acx_isr_stats {
176 u32 irqs;
177} __packed;
178
179#define PWR_STAT_MAX_CONT_MISSED_BCNS_SPREAD 10
180
181struct wl18xx_acx_pwr_stats {
182 u32 missing_bcns_cnt;
183 u32 rcvd_bcns_cnt;
184 u32 connection_out_of_sync;
185 u32 cont_miss_bcns_spread[PWR_STAT_MAX_CONT_MISSED_BCNS_SPREAD];
186 u32 rcvd_awake_bcns_cnt;
187} __packed;
188
189struct wl18xx_acx_event_stats {
190 u32 calibration;
191 u32 rx_mismatch;
192 u32 rx_mem_empty;
193} __packed;
194
195struct wl18xx_acx_ps_poll_stats {
196 u32 ps_poll_timeouts;
197 u32 upsd_timeouts;
198 u32 upsd_max_ap_turn;
199 u32 ps_poll_max_ap_turn;
200 u32 ps_poll_utilization;
201 u32 upsd_utilization;
202} __packed;
203
204struct wl18xx_acx_rx_filter_stats {
205 u32 beacon_filter;
206 u32 arp_filter;
207 u32 mc_filter;
208 u32 dup_filter;
209 u32 data_filter;
210 u32 ibss_filter;
211 u32 protection_filter;
212 u32 accum_arp_pend_requests;
213 u32 max_arp_queue_dep;
214} __packed;
215
216struct wl18xx_acx_rx_rate_stats {
217 u32 rx_frames_per_rates[50];
218} __packed;
219
220#define AGGR_STATS_TX_AGG 16
221#define AGGR_STATS_TX_RATE 16
222#define AGGR_STATS_RX_SIZE_LEN 16
223
224struct wl18xx_acx_aggr_stats {
225 u32 tx_agg_vs_rate[AGGR_STATS_TX_AGG * AGGR_STATS_TX_RATE];
226 u32 rx_size[AGGR_STATS_RX_SIZE_LEN];
227} __packed;
228
229#define PIPE_STATS_HW_FIFO 11
230
231struct wl18xx_acx_pipeline_stats {
232 u32 hs_tx_stat_fifo_int;
233 u32 hs_rx_stat_fifo_int;
234 u32 tcp_tx_stat_fifo_int;
235 u32 tcp_rx_stat_fifo_int;
236 u32 enc_tx_stat_fifo_int;
237 u32 enc_rx_stat_fifo_int;
238 u32 rx_complete_stat_fifo_int;
239 u32 pre_proc_swi;
240 u32 post_proc_swi;
241 u32 sec_frag_swi;
242 u32 pre_to_defrag_swi;
243 u32 defrag_to_csum_swi;
244 u32 csum_to_rx_xfer_swi;
245 u32 dec_packet_in;
246 u32 dec_packet_in_fifo_full;
247 u32 dec_packet_out;
248 u32 cs_rx_packet_in;
249 u32 cs_rx_packet_out;
250 u16 pipeline_fifo_full[PIPE_STATS_HW_FIFO];
251} __packed;
252
253struct wl18xx_acx_mem_stats {
254 u32 rx_free_mem_blks;
255 u32 tx_free_mem_blks;
256 u32 fwlog_free_mem_blks;
257 u32 fw_gen_free_mem_blks;
258} __packed;
259
260struct wl18xx_acx_statistics {
261 struct acx_header header;
262
263 struct wl18xx_acx_error_stats error;
264 struct wl18xx_acx_debug_stats debug;
265 struct wl18xx_acx_tx_stats tx;
266 struct wl18xx_acx_rx_stats rx;
267 struct wl18xx_acx_isr_stats isr;
268 struct wl18xx_acx_pwr_stats pwr;
269 struct wl18xx_acx_ps_poll_stats ps_poll;
270 struct wl18xx_acx_rx_filter_stats rx_filter;
271 struct wl18xx_acx_rx_rate_stats rx_rate;
272 struct wl18xx_acx_aggr_stats aggr_size;
273 struct wl18xx_acx_pipeline_stats pipeline;
274 struct wl18xx_acx_mem_stats mem;
275} __packed;
276
277struct wl18xx_acx_clear_statistics {
278 struct acx_header header;
279};
280
281int wl18xx_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap,
282 u32 sdio_blk_size, u32 extra_mem_blks,
283 u32 len_field_size);
284int wl18xx_acx_set_checksum_state(struct wl1271 *wl);
285int wl18xx_acx_clear_statistics(struct wl1271 *wl);
286
287#endif /* __WL18XX_ACX_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/conf.h b/drivers/net/wireless/ti/wl18xx/conf.h
new file mode 100644
index 000000000000..fac0b7e87e75
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/conf.h
@@ -0,0 +1,92 @@
1/*
2 * This file is part of wl18xx
3 *
4 * Copyright (C) 2011 Texas Instruments Inc.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#ifndef __WL18XX_CONF_H__
23#define __WL18XX_CONF_H__
24
25#define WL18XX_CONF_MAGIC 0x10e100ca
26#define WL18XX_CONF_VERSION (WLCORE_CONF_VERSION | 0x0002)
27#define WL18XX_CONF_MASK 0x0000ffff
28#define WL18XX_CONF_SIZE (WLCORE_CONF_SIZE + \
29 sizeof(struct wl18xx_priv_conf))
30
31#define NUM_OF_CHANNELS_11_ABG 150
32#define NUM_OF_CHANNELS_11_P 7
33#define WL18XX_NUM_OF_SUB_BANDS 9
34#define SRF_TABLE_LEN 16
35#define PIN_MUXING_SIZE 2
36
37struct wl18xx_mac_and_phy_params {
38 u8 phy_standalone;
39 u8 rdl;
40 u8 enable_clpc;
41 u8 enable_tx_low_pwr_on_siso_rdl;
42 u8 auto_detect;
43 u8 dedicated_fem;
44
45 u8 low_band_component;
46
47 /* Bit 0: One Hot, Bit 1: Control Enable, Bit 2: 1.8V, Bit 3: 3V */
48 u8 low_band_component_type;
49
50 u8 high_band_component;
51
52 /* Bit 0: One Hot, Bit 1: Control Enable, Bit 2: 1.8V, Bit 3: 3V */
53 u8 high_band_component_type;
54 u8 number_of_assembled_ant2_4;
55 u8 number_of_assembled_ant5;
56 u8 pin_muxing_platform_options[PIN_MUXING_SIZE];
57 u8 external_pa_dc2dc;
58 u8 tcxo_ldo_voltage;
59 u8 xtal_itrim_val;
60 u8 srf_state;
61 u8 srf1[SRF_TABLE_LEN];
62 u8 srf2[SRF_TABLE_LEN];
63 u8 srf3[SRF_TABLE_LEN];
64 u8 io_configuration;
65 u8 sdio_configuration;
66 u8 settings;
67 u8 rx_profile;
68 u8 per_chan_pwr_limit_arr_11abg[NUM_OF_CHANNELS_11_ABG];
69 u8 pwr_limit_reference_11_abg;
70 u8 per_chan_pwr_limit_arr_11p[NUM_OF_CHANNELS_11_P];
71 u8 pwr_limit_reference_11p;
72 u8 per_sub_band_tx_trace_loss[WL18XX_NUM_OF_SUB_BANDS];
73 u8 per_sub_band_rx_trace_loss[WL18XX_NUM_OF_SUB_BANDS];
74 u8 primary_clock_setting_time;
75 u8 clock_valid_on_wake_up;
76 u8 secondary_clock_setting_time;
77 u8 board_type;
78 /* enable point saturation */
79 u8 psat;
80 /* low/medium/high Tx power in dBm */
81 s8 low_power_val;
82 s8 med_power_val;
83 s8 high_power_val;
84 u8 padding[1];
85} __packed;
86
87struct wl18xx_priv_conf {
88 /* this structure is copied wholesale to FW */
89 struct wl18xx_mac_and_phy_params phy;
90} __packed;
91
92#endif /* __WL18XX_CONF_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c
new file mode 100644
index 000000000000..3ce6f1039af3
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/debugfs.c
@@ -0,0 +1,403 @@
1/*
2 * This file is part of wl18xx
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 * Copyright (C) 2011-2012 Texas Instruments
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19 * 02110-1301 USA
20 *
21 */
22
23#include "../wlcore/debugfs.h"
24#include "../wlcore/wlcore.h"
25
26#include "wl18xx.h"
27#include "acx.h"
28#include "debugfs.h"
29
30#define WL18XX_DEBUGFS_FWSTATS_FILE(a, b, c) \
31 DEBUGFS_FWSTATS_FILE(a, b, c, wl18xx_acx_statistics)
32#define WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(a, b, c) \
33 DEBUGFS_FWSTATS_FILE_ARRAY(a, b, c, wl18xx_acx_statistics)
34
35
36WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug1, "%u");
37WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug2, "%u");
38WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug3, "%u");
39WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug4, "%u");
40WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug5, "%u");
41WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug6, "%u");
42
43WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame, "%u");
44WL18XX_DEBUGFS_FWSTATS_FILE(error, error_null_Frame_tx_start, "%u");
45WL18XX_DEBUGFS_FWSTATS_FILE(error, error_numll_frame_cts_start, "%u");
46WL18XX_DEBUGFS_FWSTATS_FILE(error, error_bar_retry, "%u");
47WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_cts_nul_flid, "%u");
48
49WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_prepared_descs, "%u");
50WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_cmplt, "%u");
51WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_template_prepared, "%u");
52WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_data_prepared, "%u");
53WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_template_programmed, "%u");
54WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_data_programmed, "%u");
55WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_burst_programmed, "%u");
56WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_starts, "%u");
57WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_imm_resp, "%u");
58WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_templates, "%u");
59WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_int_templates, "%u");
60WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_fw_gen, "%u");
61WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_data, "%u");
62WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_null_frame, "%u");
63WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch, "%u");
64WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_retry_template, "%u");
65WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_retry_data, "%u");
66WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch_pending, "%u");
67WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch_expiry, "%u");
68WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_template, "%u");
69WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_data, "%u");
70WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_int_template, "%u");
71WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_frame_checksum, "%u");
72WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_checksum_result, "%u");
73WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_called, "%u");
74WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_mpdu_alloc_failed, "%u");
75WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_init_called, "%u");
76WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_in_process_called, "%u");
77WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_tkip_called, "%u");
78WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_key_not_found, "%u");
79WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_need_fragmentation, "%u");
80WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_bad_mblk_num, "%u");
81WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_failed, "%u");
82WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_cache_hit, "%u");
83WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_cache_miss, "%u");
84
85WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_beacon_early_term, "%u");
86WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_out_of_mpdu_nodes, "%u");
87WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_hdr_overflow, "%u");
88WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_dropped_frame, "%u");
89WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_done, "%u");
90WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_defrag, "%u");
91WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_defrag_end, "%u");
92WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_cmplt, "%u");
93WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_pre_complt, "%u");
94WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_cmplt_task, "%u");
95WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_phy_hdr, "%u");
96WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_timeout, "%u");
97WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_timeout_wa, "%u");
98WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_wa_density_dropped_frame, "%u");
99WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_wa_ba_not_expected, "%u");
100WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_frame_checksum, "%u");
101WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_checksum_result, "%u");
102WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_called, "%u");
103WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_init_called, "%u");
104WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_in_process_called, "%u");
105WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_tkip_called, "%u");
106WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_need_defrag, "%u");
107WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_decrypt_failed, "%u");
108WL18XX_DEBUGFS_FWSTATS_FILE(rx, decrypt_key_not_found, "%u");
109WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_need_decrypt, "%u");
110WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_tkip_replays, "%u");
111
112WL18XX_DEBUGFS_FWSTATS_FILE(isr, irqs, "%u");
113
114WL18XX_DEBUGFS_FWSTATS_FILE(pwr, missing_bcns_cnt, "%u");
115WL18XX_DEBUGFS_FWSTATS_FILE(pwr, rcvd_bcns_cnt, "%u");
116WL18XX_DEBUGFS_FWSTATS_FILE(pwr, connection_out_of_sync, "%u");
117WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(pwr, cont_miss_bcns_spread,
118 PWR_STAT_MAX_CONT_MISSED_BCNS_SPREAD);
119WL18XX_DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_bcns_cnt, "%u");
120
121
122WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_timeouts, "%u");
123WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_timeouts, "%u");
124WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_max_ap_turn, "%u");
125WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_max_ap_turn, "%u");
126WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_utilization, "%u");
127WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_utilization, "%u");
128
129WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, beacon_filter, "%u");
130WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, arp_filter, "%u");
131WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, mc_filter, "%u");
132WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, dup_filter, "%u");
133WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, data_filter, "%u");
134WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, ibss_filter, "%u");
135WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, protection_filter, "%u");
136WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, accum_arp_pend_requests, "%u");
137WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, max_arp_queue_dep, "%u");
138
139WL18XX_DEBUGFS_FWSTATS_FILE(rx_rate, rx_frames_per_rates, "%u");
140
141WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_vs_rate,
142 AGGR_STATS_TX_AGG*AGGR_STATS_TX_RATE);
143WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, rx_size,
144 AGGR_STATS_RX_SIZE_LEN);
145
146WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, hs_tx_stat_fifo_int, "%u");
147WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, tcp_tx_stat_fifo_int, "%u");
148WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, tcp_rx_stat_fifo_int, "%u");
149WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, enc_tx_stat_fifo_int, "%u");
150WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, enc_rx_stat_fifo_int, "%u");
151WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, rx_complete_stat_fifo_int, "%u");
152WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, pre_proc_swi, "%u");
153WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, post_proc_swi, "%u");
154WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, sec_frag_swi, "%u");
155WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, pre_to_defrag_swi, "%u");
156WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, defrag_to_csum_swi, "%u");
157WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, csum_to_rx_xfer_swi, "%u");
158WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_in, "%u");
159WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_in_fifo_full, "%u");
160WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_out, "%u");
161WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, cs_rx_packet_in, "%u");
162WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, cs_rx_packet_out, "%u");
163
164WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(pipeline, pipeline_fifo_full,
165 PIPE_STATS_HW_FIFO);
166
167WL18XX_DEBUGFS_FWSTATS_FILE(mem, rx_free_mem_blks, "%u");
168WL18XX_DEBUGFS_FWSTATS_FILE(mem, tx_free_mem_blks, "%u");
169WL18XX_DEBUGFS_FWSTATS_FILE(mem, fwlog_free_mem_blks, "%u");
170WL18XX_DEBUGFS_FWSTATS_FILE(mem, fw_gen_free_mem_blks, "%u");
171
172static ssize_t conf_read(struct file *file, char __user *user_buf,
173 size_t count, loff_t *ppos)
174{
175 struct wl1271 *wl = file->private_data;
176 struct wl18xx_priv *priv = wl->priv;
177 struct wlcore_conf_header header;
178 char *buf, *pos;
179 size_t len;
180 int ret;
181
182 len = WL18XX_CONF_SIZE;
183 buf = kmalloc(len, GFP_KERNEL);
184 if (!buf)
185 return -ENOMEM;
186
187 header.magic = cpu_to_le32(WL18XX_CONF_MAGIC);
188 header.version = cpu_to_le32(WL18XX_CONF_VERSION);
189 header.checksum = 0;
190
191 mutex_lock(&wl->mutex);
192
193 pos = buf;
194 memcpy(pos, &header, sizeof(header));
195 pos += sizeof(header);
196 memcpy(pos, &wl->conf, sizeof(wl->conf));
197 pos += sizeof(wl->conf);
198 memcpy(pos, &priv->conf, sizeof(priv->conf));
199
200 mutex_unlock(&wl->mutex);
201
202 ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
203
204 kfree(buf);
205 return ret;
206}
207
208static const struct file_operations conf_ops = {
209 .read = conf_read,
210 .open = simple_open,
211 .llseek = default_llseek,
212};
213
214static ssize_t clear_fw_stats_write(struct file *file,
215 const char __user *user_buf,
216 size_t count, loff_t *ppos)
217{
218 struct wl1271 *wl = file->private_data;
219 int ret;
220
221 mutex_lock(&wl->mutex);
222
223 if (wl->state == WL1271_STATE_OFF)
224 goto out;
225
226 ret = wl18xx_acx_clear_statistics(wl);
227 if (ret < 0) {
228 count = ret;
229 goto out;
230 }
231out:
232 mutex_unlock(&wl->mutex);
233 return count;
234}
235
236static const struct file_operations clear_fw_stats_ops = {
237 .write = clear_fw_stats_write,
238 .open = simple_open,
239 .llseek = default_llseek,
240};
241
242int wl18xx_debugfs_add_files(struct wl1271 *wl,
243 struct dentry *rootdir)
244{
245 int ret = 0;
246 struct dentry *entry, *stats, *moddir;
247
248 moddir = debugfs_create_dir(KBUILD_MODNAME, rootdir);
249 if (!moddir || IS_ERR(moddir)) {
250 entry = moddir;
251 goto err;
252 }
253
254 stats = debugfs_create_dir("fw_stats", moddir);
255 if (!stats || IS_ERR(stats)) {
256 entry = stats;
257 goto err;
258 }
259
260 DEBUGFS_ADD(clear_fw_stats, stats);
261
262 DEBUGFS_FWSTATS_ADD(debug, debug1);
263 DEBUGFS_FWSTATS_ADD(debug, debug2);
264 DEBUGFS_FWSTATS_ADD(debug, debug3);
265 DEBUGFS_FWSTATS_ADD(debug, debug4);
266 DEBUGFS_FWSTATS_ADD(debug, debug5);
267 DEBUGFS_FWSTATS_ADD(debug, debug6);
268
269 DEBUGFS_FWSTATS_ADD(error, error_frame);
270 DEBUGFS_FWSTATS_ADD(error, error_null_Frame_tx_start);
271 DEBUGFS_FWSTATS_ADD(error, error_numll_frame_cts_start);
272 DEBUGFS_FWSTATS_ADD(error, error_bar_retry);
273 DEBUGFS_FWSTATS_ADD(error, error_frame_cts_nul_flid);
274
275 DEBUGFS_FWSTATS_ADD(tx, tx_prepared_descs);
276 DEBUGFS_FWSTATS_ADD(tx, tx_cmplt);
277 DEBUGFS_FWSTATS_ADD(tx, tx_template_prepared);
278 DEBUGFS_FWSTATS_ADD(tx, tx_data_prepared);
279 DEBUGFS_FWSTATS_ADD(tx, tx_template_programmed);
280 DEBUGFS_FWSTATS_ADD(tx, tx_data_programmed);
281 DEBUGFS_FWSTATS_ADD(tx, tx_burst_programmed);
282 DEBUGFS_FWSTATS_ADD(tx, tx_starts);
283 DEBUGFS_FWSTATS_ADD(tx, tx_imm_resp);
284 DEBUGFS_FWSTATS_ADD(tx, tx_start_templates);
285 DEBUGFS_FWSTATS_ADD(tx, tx_start_int_templates);
286 DEBUGFS_FWSTATS_ADD(tx, tx_start_fw_gen);
287 DEBUGFS_FWSTATS_ADD(tx, tx_start_data);
288 DEBUGFS_FWSTATS_ADD(tx, tx_start_null_frame);
289 DEBUGFS_FWSTATS_ADD(tx, tx_exch);
290 DEBUGFS_FWSTATS_ADD(tx, tx_retry_template);
291 DEBUGFS_FWSTATS_ADD(tx, tx_retry_data);
292 DEBUGFS_FWSTATS_ADD(tx, tx_exch_pending);
293 DEBUGFS_FWSTATS_ADD(tx, tx_exch_expiry);
294 DEBUGFS_FWSTATS_ADD(tx, tx_done_template);
295 DEBUGFS_FWSTATS_ADD(tx, tx_done_data);
296 DEBUGFS_FWSTATS_ADD(tx, tx_done_int_template);
297 DEBUGFS_FWSTATS_ADD(tx, tx_frame_checksum);
298 DEBUGFS_FWSTATS_ADD(tx, tx_checksum_result);
299 DEBUGFS_FWSTATS_ADD(tx, frag_called);
300 DEBUGFS_FWSTATS_ADD(tx, frag_mpdu_alloc_failed);
301 DEBUGFS_FWSTATS_ADD(tx, frag_init_called);
302 DEBUGFS_FWSTATS_ADD(tx, frag_in_process_called);
303 DEBUGFS_FWSTATS_ADD(tx, frag_tkip_called);
304 DEBUGFS_FWSTATS_ADD(tx, frag_key_not_found);
305 DEBUGFS_FWSTATS_ADD(tx, frag_need_fragmentation);
306 DEBUGFS_FWSTATS_ADD(tx, frag_bad_mblk_num);
307 DEBUGFS_FWSTATS_ADD(tx, frag_failed);
308 DEBUGFS_FWSTATS_ADD(tx, frag_cache_hit);
309 DEBUGFS_FWSTATS_ADD(tx, frag_cache_miss);
310
311 DEBUGFS_FWSTATS_ADD(rx, rx_beacon_early_term);
312 DEBUGFS_FWSTATS_ADD(rx, rx_out_of_mpdu_nodes);
313 DEBUGFS_FWSTATS_ADD(rx, rx_hdr_overflow);
314 DEBUGFS_FWSTATS_ADD(rx, rx_dropped_frame);
315 DEBUGFS_FWSTATS_ADD(rx, rx_done);
316 DEBUGFS_FWSTATS_ADD(rx, rx_defrag);
317 DEBUGFS_FWSTATS_ADD(rx, rx_defrag_end);
318 DEBUGFS_FWSTATS_ADD(rx, rx_cmplt);
319 DEBUGFS_FWSTATS_ADD(rx, rx_pre_complt);
320 DEBUGFS_FWSTATS_ADD(rx, rx_cmplt_task);
321 DEBUGFS_FWSTATS_ADD(rx, rx_phy_hdr);
322 DEBUGFS_FWSTATS_ADD(rx, rx_timeout);
323 DEBUGFS_FWSTATS_ADD(rx, rx_timeout_wa);
324 DEBUGFS_FWSTATS_ADD(rx, rx_wa_density_dropped_frame);
325 DEBUGFS_FWSTATS_ADD(rx, rx_wa_ba_not_expected);
326 DEBUGFS_FWSTATS_ADD(rx, rx_frame_checksum);
327 DEBUGFS_FWSTATS_ADD(rx, rx_checksum_result);
328 DEBUGFS_FWSTATS_ADD(rx, defrag_called);
329 DEBUGFS_FWSTATS_ADD(rx, defrag_init_called);
330 DEBUGFS_FWSTATS_ADD(rx, defrag_in_process_called);
331 DEBUGFS_FWSTATS_ADD(rx, defrag_tkip_called);
332 DEBUGFS_FWSTATS_ADD(rx, defrag_need_defrag);
333 DEBUGFS_FWSTATS_ADD(rx, defrag_decrypt_failed);
334 DEBUGFS_FWSTATS_ADD(rx, decrypt_key_not_found);
335 DEBUGFS_FWSTATS_ADD(rx, defrag_need_decrypt);
336 DEBUGFS_FWSTATS_ADD(rx, rx_tkip_replays);
337
338 DEBUGFS_FWSTATS_ADD(isr, irqs);
339
340 DEBUGFS_FWSTATS_ADD(pwr, missing_bcns_cnt);
341 DEBUGFS_FWSTATS_ADD(pwr, rcvd_bcns_cnt);
342 DEBUGFS_FWSTATS_ADD(pwr, connection_out_of_sync);
343 DEBUGFS_FWSTATS_ADD(pwr, cont_miss_bcns_spread);
344 DEBUGFS_FWSTATS_ADD(pwr, rcvd_awake_bcns_cnt);
345
346 DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_timeouts);
347 DEBUGFS_FWSTATS_ADD(ps_poll, upsd_timeouts);
348 DEBUGFS_FWSTATS_ADD(ps_poll, upsd_max_ap_turn);
349 DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_max_ap_turn);
350 DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_utilization);
351 DEBUGFS_FWSTATS_ADD(ps_poll, upsd_utilization);
352
353 DEBUGFS_FWSTATS_ADD(rx_filter, beacon_filter);
354 DEBUGFS_FWSTATS_ADD(rx_filter, arp_filter);
355 DEBUGFS_FWSTATS_ADD(rx_filter, mc_filter);
356 DEBUGFS_FWSTATS_ADD(rx_filter, dup_filter);
357 DEBUGFS_FWSTATS_ADD(rx_filter, data_filter);
358 DEBUGFS_FWSTATS_ADD(rx_filter, ibss_filter);
359 DEBUGFS_FWSTATS_ADD(rx_filter, protection_filter);
360 DEBUGFS_FWSTATS_ADD(rx_filter, accum_arp_pend_requests);
361 DEBUGFS_FWSTATS_ADD(rx_filter, max_arp_queue_dep);
362
363 DEBUGFS_FWSTATS_ADD(rx_rate, rx_frames_per_rates);
364
365 DEBUGFS_FWSTATS_ADD(aggr_size, tx_agg_vs_rate);
366 DEBUGFS_FWSTATS_ADD(aggr_size, rx_size);
367
368 DEBUGFS_FWSTATS_ADD(pipeline, hs_tx_stat_fifo_int);
369 DEBUGFS_FWSTATS_ADD(pipeline, tcp_tx_stat_fifo_int);
370 DEBUGFS_FWSTATS_ADD(pipeline, tcp_rx_stat_fifo_int);
371 DEBUGFS_FWSTATS_ADD(pipeline, enc_tx_stat_fifo_int);
372 DEBUGFS_FWSTATS_ADD(pipeline, enc_rx_stat_fifo_int);
373 DEBUGFS_FWSTATS_ADD(pipeline, rx_complete_stat_fifo_int);
374 DEBUGFS_FWSTATS_ADD(pipeline, pre_proc_swi);
375 DEBUGFS_FWSTATS_ADD(pipeline, post_proc_swi);
376 DEBUGFS_FWSTATS_ADD(pipeline, sec_frag_swi);
377 DEBUGFS_FWSTATS_ADD(pipeline, pre_to_defrag_swi);
378 DEBUGFS_FWSTATS_ADD(pipeline, defrag_to_csum_swi);
379 DEBUGFS_FWSTATS_ADD(pipeline, csum_to_rx_xfer_swi);
380 DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_in);
381 DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_in_fifo_full);
382 DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_out);
383 DEBUGFS_FWSTATS_ADD(pipeline, cs_rx_packet_in);
384 DEBUGFS_FWSTATS_ADD(pipeline, cs_rx_packet_out);
385 DEBUGFS_FWSTATS_ADD(pipeline, pipeline_fifo_full);
386
387 DEBUGFS_FWSTATS_ADD(mem, rx_free_mem_blks);
388 DEBUGFS_FWSTATS_ADD(mem, tx_free_mem_blks);
389 DEBUGFS_FWSTATS_ADD(mem, fwlog_free_mem_blks);
390 DEBUGFS_FWSTATS_ADD(mem, fw_gen_free_mem_blks);
391
392 DEBUGFS_ADD(conf, moddir);
393
394 return 0;
395
396err:
397 if (IS_ERR(entry))
398 ret = PTR_ERR(entry);
399 else
400 ret = -ENOMEM;
401
402 return ret;
403}
diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.h b/drivers/net/wireless/ti/wl18xx/debugfs.h
new file mode 100644
index 000000000000..ed679bebf620
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/debugfs.h
@@ -0,0 +1,28 @@
1/*
2 * This file is part of wl18xx
3 *
4 * Copyright (C) 2012 Texas Instruments. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#ifndef __WL18XX_DEBUGFS_H__
23#define __WL18XX_DEBUGFS_H__
24
25int wl18xx_debugfs_add_files(struct wl1271 *wl,
26 struct dentry *rootdir);
27
28#endif /* __WL18XX_DEBUGFS_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/io.c b/drivers/net/wireless/ti/wl18xx/io.c
new file mode 100644
index 000000000000..0c06ccfd1b8c
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/io.c
@@ -0,0 +1,75 @@
1/*
2 * This file is part of wl18xx
3 *
4 * Copyright (C) 2011 Texas Instruments
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#include "../wlcore/wlcore.h"
23#include "../wlcore/io.h"
24
25#include "io.h"
26
27int wl18xx_top_reg_write(struct wl1271 *wl, int addr, u16 val)
28{
29 u32 tmp;
30 int ret;
31
32 if (WARN_ON(addr % 2))
33 return -EINVAL;
34
35 if ((addr % 4) == 0) {
36 ret = wlcore_read32(wl, addr, &tmp);
37 if (ret < 0)
38 goto out;
39
40 tmp = (tmp & 0xffff0000) | val;
41 ret = wlcore_write32(wl, addr, tmp);
42 } else {
43 ret = wlcore_read32(wl, addr - 2, &tmp);
44 if (ret < 0)
45 goto out;
46
47 tmp = (tmp & 0xffff) | (val << 16);
48 ret = wlcore_write32(wl, addr - 2, tmp);
49 }
50
51out:
52 return ret;
53}
54
55int wl18xx_top_reg_read(struct wl1271 *wl, int addr, u16 *out)
56{
57 u32 val;
58 int ret;
59
60 if (WARN_ON(addr % 2))
61 return -EINVAL;
62
63 if ((addr % 4) == 0) {
64 /* address is 4-bytes aligned */
65 ret = wlcore_read32(wl, addr, &val);
66 if (ret >= 0 && out)
67 *out = val & 0xffff;
68 } else {
69 ret = wlcore_read32(wl, addr - 2, &val);
70 if (ret >= 0 && out)
71 *out = (val & 0xffff0000) >> 16;
72 }
73
74 return ret;
75}
diff --git a/drivers/net/wireless/ti/wl18xx/io.h b/drivers/net/wireless/ti/wl18xx/io.h
new file mode 100644
index 000000000000..c32ae30277df
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/io.h
@@ -0,0 +1,28 @@
1/*
2 * This file is part of wl18xx
3 *
4 * Copyright (C) 2011 Texas Instruments
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#ifndef __WL18XX_IO_H__
23#define __WL18XX_IO_H__
24
25int __must_check wl18xx_top_reg_write(struct wl1271 *wl, int addr, u16 val);
26int __must_check wl18xx_top_reg_read(struct wl1271 *wl, int addr, u16 *out);
27
28#endif /* __WL18XX_IO_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
new file mode 100644
index 000000000000..b378b34c4a6a
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -0,0 +1,1557 @@
1/*
2 * This file is part of wl18xx
3 *
4 * Copyright (C) 2011 Texas Instruments
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#include <linux/module.h>
23#include <linux/platform_device.h>
24#include <linux/ip.h>
25#include <linux/firmware.h>
26
27#include "../wlcore/wlcore.h"
28#include "../wlcore/debug.h"
29#include "../wlcore/io.h"
30#include "../wlcore/acx.h"
31#include "../wlcore/tx.h"
32#include "../wlcore/rx.h"
33#include "../wlcore/io.h"
34#include "../wlcore/boot.h"
35
36#include "reg.h"
37#include "conf.h"
38#include "acx.h"
39#include "tx.h"
40#include "wl18xx.h"
41#include "io.h"
42#include "debugfs.h"
43
44#define WL18XX_RX_CHECKSUM_MASK 0x40
45
46static char *ht_mode_param = "default";
47static char *board_type_param = "hdk";
48static bool checksum_param = false;
49static bool enable_11a_param = true;
50static int num_rx_desc_param = -1;
51
52/* phy paramters */
53static int dc2dc_param = -1;
54static int n_antennas_2_param = -1;
55static int n_antennas_5_param = -1;
56static int low_band_component_param = -1;
57static int low_band_component_type_param = -1;
58static int high_band_component_param = -1;
59static int high_band_component_type_param = -1;
60static int pwr_limit_reference_11_abg_param = -1;
61
62static const u8 wl18xx_rate_to_idx_2ghz[] = {
63 /* MCS rates are used only with 11n */
64 15, /* WL18XX_CONF_HW_RXTX_RATE_MCS15 */
65 14, /* WL18XX_CONF_HW_RXTX_RATE_MCS14 */
66 13, /* WL18XX_CONF_HW_RXTX_RATE_MCS13 */
67 12, /* WL18XX_CONF_HW_RXTX_RATE_MCS12 */
68 11, /* WL18XX_CONF_HW_RXTX_RATE_MCS11 */
69 10, /* WL18XX_CONF_HW_RXTX_RATE_MCS10 */
70 9, /* WL18XX_CONF_HW_RXTX_RATE_MCS9 */
71 8, /* WL18XX_CONF_HW_RXTX_RATE_MCS8 */
72 7, /* WL18XX_CONF_HW_RXTX_RATE_MCS7 */
73 6, /* WL18XX_CONF_HW_RXTX_RATE_MCS6 */
74 5, /* WL18XX_CONF_HW_RXTX_RATE_MCS5 */
75 4, /* WL18XX_CONF_HW_RXTX_RATE_MCS4 */
76 3, /* WL18XX_CONF_HW_RXTX_RATE_MCS3 */
77 2, /* WL18XX_CONF_HW_RXTX_RATE_MCS2 */
78 1, /* WL18XX_CONF_HW_RXTX_RATE_MCS1 */
79 0, /* WL18XX_CONF_HW_RXTX_RATE_MCS0 */
80
81 11, /* WL18XX_CONF_HW_RXTX_RATE_54 */
82 10, /* WL18XX_CONF_HW_RXTX_RATE_48 */
83 9, /* WL18XX_CONF_HW_RXTX_RATE_36 */
84 8, /* WL18XX_CONF_HW_RXTX_RATE_24 */
85
86 /* TI-specific rate */
87 CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL18XX_CONF_HW_RXTX_RATE_22 */
88
89 7, /* WL18XX_CONF_HW_RXTX_RATE_18 */
90 6, /* WL18XX_CONF_HW_RXTX_RATE_12 */
91 3, /* WL18XX_CONF_HW_RXTX_RATE_11 */
92 5, /* WL18XX_CONF_HW_RXTX_RATE_9 */
93 4, /* WL18XX_CONF_HW_RXTX_RATE_6 */
94 2, /* WL18XX_CONF_HW_RXTX_RATE_5_5 */
95 1, /* WL18XX_CONF_HW_RXTX_RATE_2 */
96 0 /* WL18XX_CONF_HW_RXTX_RATE_1 */
97};
98
99static const u8 wl18xx_rate_to_idx_5ghz[] = {
100 /* MCS rates are used only with 11n */
101 15, /* WL18XX_CONF_HW_RXTX_RATE_MCS15 */
102 14, /* WL18XX_CONF_HW_RXTX_RATE_MCS14 */
103 13, /* WL18XX_CONF_HW_RXTX_RATE_MCS13 */
104 12, /* WL18XX_CONF_HW_RXTX_RATE_MCS12 */
105 11, /* WL18XX_CONF_HW_RXTX_RATE_MCS11 */
106 10, /* WL18XX_CONF_HW_RXTX_RATE_MCS10 */
107 9, /* WL18XX_CONF_HW_RXTX_RATE_MCS9 */
108 8, /* WL18XX_CONF_HW_RXTX_RATE_MCS8 */
109 7, /* WL18XX_CONF_HW_RXTX_RATE_MCS7 */
110 6, /* WL18XX_CONF_HW_RXTX_RATE_MCS6 */
111 5, /* WL18XX_CONF_HW_RXTX_RATE_MCS5 */
112 4, /* WL18XX_CONF_HW_RXTX_RATE_MCS4 */
113 3, /* WL18XX_CONF_HW_RXTX_RATE_MCS3 */
114 2, /* WL18XX_CONF_HW_RXTX_RATE_MCS2 */
115 1, /* WL18XX_CONF_HW_RXTX_RATE_MCS1 */
116 0, /* WL18XX_CONF_HW_RXTX_RATE_MCS0 */
117
118 7, /* WL18XX_CONF_HW_RXTX_RATE_54 */
119 6, /* WL18XX_CONF_HW_RXTX_RATE_48 */
120 5, /* WL18XX_CONF_HW_RXTX_RATE_36 */
121 4, /* WL18XX_CONF_HW_RXTX_RATE_24 */
122
123 /* TI-specific rate */
124 CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL18XX_CONF_HW_RXTX_RATE_22 */
125
126 3, /* WL18XX_CONF_HW_RXTX_RATE_18 */
127 2, /* WL18XX_CONF_HW_RXTX_RATE_12 */
128 CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL18XX_CONF_HW_RXTX_RATE_11 */
129 1, /* WL18XX_CONF_HW_RXTX_RATE_9 */
130 0, /* WL18XX_CONF_HW_RXTX_RATE_6 */
131 CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL18XX_CONF_HW_RXTX_RATE_5_5 */
132 CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL18XX_CONF_HW_RXTX_RATE_2 */
133 CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL18XX_CONF_HW_RXTX_RATE_1 */
134};
135
136static const u8 *wl18xx_band_rate_to_idx[] = {
137 [IEEE80211_BAND_2GHZ] = wl18xx_rate_to_idx_2ghz,
138 [IEEE80211_BAND_5GHZ] = wl18xx_rate_to_idx_5ghz
139};
140
141enum wl18xx_hw_rates {
142 WL18XX_CONF_HW_RXTX_RATE_MCS15 = 0,
143 WL18XX_CONF_HW_RXTX_RATE_MCS14,
144 WL18XX_CONF_HW_RXTX_RATE_MCS13,
145 WL18XX_CONF_HW_RXTX_RATE_MCS12,
146 WL18XX_CONF_HW_RXTX_RATE_MCS11,
147 WL18XX_CONF_HW_RXTX_RATE_MCS10,
148 WL18XX_CONF_HW_RXTX_RATE_MCS9,
149 WL18XX_CONF_HW_RXTX_RATE_MCS8,
150 WL18XX_CONF_HW_RXTX_RATE_MCS7,
151 WL18XX_CONF_HW_RXTX_RATE_MCS6,
152 WL18XX_CONF_HW_RXTX_RATE_MCS5,
153 WL18XX_CONF_HW_RXTX_RATE_MCS4,
154 WL18XX_CONF_HW_RXTX_RATE_MCS3,
155 WL18XX_CONF_HW_RXTX_RATE_MCS2,
156 WL18XX_CONF_HW_RXTX_RATE_MCS1,
157 WL18XX_CONF_HW_RXTX_RATE_MCS0,
158 WL18XX_CONF_HW_RXTX_RATE_54,
159 WL18XX_CONF_HW_RXTX_RATE_48,
160 WL18XX_CONF_HW_RXTX_RATE_36,
161 WL18XX_CONF_HW_RXTX_RATE_24,
162 WL18XX_CONF_HW_RXTX_RATE_22,
163 WL18XX_CONF_HW_RXTX_RATE_18,
164 WL18XX_CONF_HW_RXTX_RATE_12,
165 WL18XX_CONF_HW_RXTX_RATE_11,
166 WL18XX_CONF_HW_RXTX_RATE_9,
167 WL18XX_CONF_HW_RXTX_RATE_6,
168 WL18XX_CONF_HW_RXTX_RATE_5_5,
169 WL18XX_CONF_HW_RXTX_RATE_2,
170 WL18XX_CONF_HW_RXTX_RATE_1,
171 WL18XX_CONF_HW_RXTX_RATE_MAX,
172};
173
174static struct wlcore_conf wl18xx_conf = {
175 .sg = {
176 .params = {
177 [CONF_SG_ACL_BT_MASTER_MIN_BR] = 10,
178 [CONF_SG_ACL_BT_MASTER_MAX_BR] = 180,
179 [CONF_SG_ACL_BT_SLAVE_MIN_BR] = 10,
180 [CONF_SG_ACL_BT_SLAVE_MAX_BR] = 180,
181 [CONF_SG_ACL_BT_MASTER_MIN_EDR] = 10,
182 [CONF_SG_ACL_BT_MASTER_MAX_EDR] = 80,
183 [CONF_SG_ACL_BT_SLAVE_MIN_EDR] = 10,
184 [CONF_SG_ACL_BT_SLAVE_MAX_EDR] = 80,
185 [CONF_SG_ACL_WLAN_PS_MASTER_BR] = 8,
186 [CONF_SG_ACL_WLAN_PS_SLAVE_BR] = 8,
187 [CONF_SG_ACL_WLAN_PS_MASTER_EDR] = 20,
188 [CONF_SG_ACL_WLAN_PS_SLAVE_EDR] = 20,
189 [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_BR] = 20,
190 [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_BR] = 35,
191 [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_BR] = 16,
192 [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_BR] = 35,
193 [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_EDR] = 32,
194 [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_EDR] = 50,
195 [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_EDR] = 28,
196 [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_EDR] = 50,
197 [CONF_SG_ACL_ACTIVE_SCAN_WLAN_BR] = 10,
198 [CONF_SG_ACL_ACTIVE_SCAN_WLAN_EDR] = 20,
199 [CONF_SG_ACL_PASSIVE_SCAN_BT_BR] = 75,
200 [CONF_SG_ACL_PASSIVE_SCAN_WLAN_BR] = 15,
201 [CONF_SG_ACL_PASSIVE_SCAN_BT_EDR] = 27,
202 [CONF_SG_ACL_PASSIVE_SCAN_WLAN_EDR] = 17,
203 /* active scan params */
204 [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170,
205 [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50,
206 [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100,
207 /* passive scan params */
208 [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_BR] = 800,
209 [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_EDR] = 200,
210 [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3] = 200,
211 /* passive scan in dual antenna params */
212 [CONF_SG_CONSECUTIVE_HV3_IN_PASSIVE_SCAN] = 0,
213 [CONF_SG_BCN_HV3_COLLISION_THRESH_IN_PASSIVE_SCAN] = 0,
214 [CONF_SG_TX_RX_PROTECTION_BWIDTH_IN_PASSIVE_SCAN] = 0,
215 /* general params */
216 [CONF_SG_STA_FORCE_PS_IN_BT_SCO] = 1,
217 [CONF_SG_ANTENNA_CONFIGURATION] = 0,
218 [CONF_SG_BEACON_MISS_PERCENT] = 60,
219 [CONF_SG_DHCP_TIME] = 5000,
220 [CONF_SG_RXT] = 1200,
221 [CONF_SG_TXT] = 1000,
222 [CONF_SG_ADAPTIVE_RXT_TXT] = 1,
223 [CONF_SG_GENERAL_USAGE_BIT_MAP] = 3,
224 [CONF_SG_HV3_MAX_SERVED] = 6,
225 [CONF_SG_PS_POLL_TIMEOUT] = 10,
226 [CONF_SG_UPSD_TIMEOUT] = 10,
227 [CONF_SG_CONSECUTIVE_CTS_THRESHOLD] = 2,
228 [CONF_SG_STA_RX_WINDOW_AFTER_DTIM] = 5,
229 [CONF_SG_STA_CONNECTION_PROTECTION_TIME] = 30,
230 /* AP params */
231 [CONF_AP_BEACON_MISS_TX] = 3,
232 [CONF_AP_RX_WINDOW_AFTER_BEACON] = 10,
233 [CONF_AP_BEACON_WINDOW_INTERVAL] = 2,
234 [CONF_AP_CONNECTION_PROTECTION_TIME] = 0,
235 [CONF_AP_BT_ACL_VAL_BT_SERVE_TIME] = 25,
236 [CONF_AP_BT_ACL_VAL_WL_SERVE_TIME] = 25,
237 /* CTS Diluting params */
238 [CONF_SG_CTS_DILUTED_BAD_RX_PACKETS_TH] = 0,
239 [CONF_SG_CTS_CHOP_IN_DUAL_ANT_SCO_MASTER] = 0,
240 },
241 .state = CONF_SG_PROTECTIVE,
242 },
243 .rx = {
244 .rx_msdu_life_time = 512000,
245 .packet_detection_threshold = 0,
246 .ps_poll_timeout = 15,
247 .upsd_timeout = 15,
248 .rts_threshold = IEEE80211_MAX_RTS_THRESHOLD,
249 .rx_cca_threshold = 0,
250 .irq_blk_threshold = 0xFFFF,
251 .irq_pkt_threshold = 0,
252 .irq_timeout = 600,
253 .queue_type = CONF_RX_QUEUE_TYPE_LOW_PRIORITY,
254 },
255 .tx = {
256 .tx_energy_detection = 0,
257 .sta_rc_conf = {
258 .enabled_rates = 0,
259 .short_retry_limit = 10,
260 .long_retry_limit = 10,
261 .aflags = 0,
262 },
263 .ac_conf_count = 4,
264 .ac_conf = {
265 [CONF_TX_AC_BE] = {
266 .ac = CONF_TX_AC_BE,
267 .cw_min = 15,
268 .cw_max = 63,
269 .aifsn = 3,
270 .tx_op_limit = 0,
271 },
272 [CONF_TX_AC_BK] = {
273 .ac = CONF_TX_AC_BK,
274 .cw_min = 15,
275 .cw_max = 63,
276 .aifsn = 7,
277 .tx_op_limit = 0,
278 },
279 [CONF_TX_AC_VI] = {
280 .ac = CONF_TX_AC_VI,
281 .cw_min = 15,
282 .cw_max = 63,
283 .aifsn = CONF_TX_AIFS_PIFS,
284 .tx_op_limit = 3008,
285 },
286 [CONF_TX_AC_VO] = {
287 .ac = CONF_TX_AC_VO,
288 .cw_min = 15,
289 .cw_max = 63,
290 .aifsn = CONF_TX_AIFS_PIFS,
291 .tx_op_limit = 1504,
292 },
293 },
294 .max_tx_retries = 100,
295 .ap_aging_period = 300,
296 .tid_conf_count = 4,
297 .tid_conf = {
298 [CONF_TX_AC_BE] = {
299 .queue_id = CONF_TX_AC_BE,
300 .channel_type = CONF_CHANNEL_TYPE_EDCF,
301 .tsid = CONF_TX_AC_BE,
302 .ps_scheme = CONF_PS_SCHEME_LEGACY,
303 .ack_policy = CONF_ACK_POLICY_LEGACY,
304 .apsd_conf = {0, 0},
305 },
306 [CONF_TX_AC_BK] = {
307 .queue_id = CONF_TX_AC_BK,
308 .channel_type = CONF_CHANNEL_TYPE_EDCF,
309 .tsid = CONF_TX_AC_BK,
310 .ps_scheme = CONF_PS_SCHEME_LEGACY,
311 .ack_policy = CONF_ACK_POLICY_LEGACY,
312 .apsd_conf = {0, 0},
313 },
314 [CONF_TX_AC_VI] = {
315 .queue_id = CONF_TX_AC_VI,
316 .channel_type = CONF_CHANNEL_TYPE_EDCF,
317 .tsid = CONF_TX_AC_VI,
318 .ps_scheme = CONF_PS_SCHEME_LEGACY,
319 .ack_policy = CONF_ACK_POLICY_LEGACY,
320 .apsd_conf = {0, 0},
321 },
322 [CONF_TX_AC_VO] = {
323 .queue_id = CONF_TX_AC_VO,
324 .channel_type = CONF_CHANNEL_TYPE_EDCF,
325 .tsid = CONF_TX_AC_VO,
326 .ps_scheme = CONF_PS_SCHEME_LEGACY,
327 .ack_policy = CONF_ACK_POLICY_LEGACY,
328 .apsd_conf = {0, 0},
329 },
330 },
331 .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD,
332 .tx_compl_timeout = 350,
333 .tx_compl_threshold = 10,
334 .basic_rate = CONF_HW_BIT_RATE_1MBPS,
335 .basic_rate_5 = CONF_HW_BIT_RATE_6MBPS,
336 .tmpl_short_retry_limit = 10,
337 .tmpl_long_retry_limit = 10,
338 .tx_watchdog_timeout = 5000,
339 },
340 .conn = {
341 .wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
342 .listen_interval = 1,
343 .suspend_wake_up_event = CONF_WAKE_UP_EVENT_N_DTIM,
344 .suspend_listen_interval = 3,
345 .bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED,
346 .bcn_filt_ie_count = 3,
347 .bcn_filt_ie = {
348 [0] = {
349 .ie = WLAN_EID_CHANNEL_SWITCH,
350 .rule = CONF_BCN_RULE_PASS_ON_APPEARANCE,
351 },
352 [1] = {
353 .ie = WLAN_EID_HT_OPERATION,
354 .rule = CONF_BCN_RULE_PASS_ON_CHANGE,
355 },
356 [2] = {
357 .ie = WLAN_EID_ERP_INFO,
358 .rule = CONF_BCN_RULE_PASS_ON_CHANGE,
359 },
360 },
361 .synch_fail_thold = 12,
362 .bss_lose_timeout = 400,
363 .beacon_rx_timeout = 10000,
364 .broadcast_timeout = 20000,
365 .rx_broadcast_in_ps = 1,
366 .ps_poll_threshold = 10,
367 .bet_enable = CONF_BET_MODE_ENABLE,
368 .bet_max_consecutive = 50,
369 .psm_entry_retries = 8,
370 .psm_exit_retries = 16,
371 .psm_entry_nullfunc_retries = 3,
372 .dynamic_ps_timeout = 1500,
373 .forced_ps = false,
374 .keep_alive_interval = 55000,
375 .max_listen_interval = 20,
376 .sta_sleep_auth = WL1271_PSM_ILLEGAL,
377 },
378 .itrim = {
379 .enable = false,
380 .timeout = 50000,
381 },
382 .pm_config = {
383 .host_clk_settling_time = 5000,
384 .host_fast_wakeup_support = CONF_FAST_WAKEUP_DISABLE,
385 },
386 .roam_trigger = {
387 .trigger_pacing = 1,
388 .avg_weight_rssi_beacon = 20,
389 .avg_weight_rssi_data = 10,
390 .avg_weight_snr_beacon = 20,
391 .avg_weight_snr_data = 10,
392 },
393 .scan = {
394 .min_dwell_time_active = 7500,
395 .max_dwell_time_active = 30000,
396 .min_dwell_time_passive = 100000,
397 .max_dwell_time_passive = 100000,
398 .num_probe_reqs = 2,
399 .split_scan_timeout = 50000,
400 },
401 .sched_scan = {
402 /*
403 * Values are in TU/1000 but since sched scan FW command
404 * params are in TUs rounding up may occur.
405 */
406 .base_dwell_time = 7500,
407 .max_dwell_time_delta = 22500,
408 /* based on 250bits per probe @1Mbps */
409 .dwell_time_delta_per_probe = 2000,
410 /* based on 250bits per probe @6Mbps (plus a bit more) */
411 .dwell_time_delta_per_probe_5 = 350,
412 .dwell_time_passive = 100000,
413 .dwell_time_dfs = 150000,
414 .num_probe_reqs = 2,
415 .rssi_threshold = -90,
416 .snr_threshold = 0,
417 },
418 .ht = {
419 .rx_ba_win_size = 10,
420 .tx_ba_win_size = 64,
421 .inactivity_timeout = 10000,
422 .tx_ba_tid_bitmap = CONF_TX_BA_ENABLED_TID_BITMAP,
423 },
424 .mem = {
425 .num_stations = 1,
426 .ssid_profiles = 1,
427 .rx_block_num = 40,
428 .tx_min_block_num = 40,
429 .dynamic_memory = 1,
430 .min_req_tx_blocks = 45,
431 .min_req_rx_blocks = 22,
432 .tx_min = 27,
433 },
434 .fm_coex = {
435 .enable = true,
436 .swallow_period = 5,
437 .n_divider_fref_set_1 = 0xff, /* default */
438 .n_divider_fref_set_2 = 12,
439 .m_divider_fref_set_1 = 0xffff,
440 .m_divider_fref_set_2 = 148, /* default */
441 .coex_pll_stabilization_time = 0xffffffff, /* default */
442 .ldo_stabilization_time = 0xffff, /* default */
443 .fm_disturbed_band_margin = 0xff, /* default */
444 .swallow_clk_diff = 0xff, /* default */
445 },
446 .rx_streaming = {
447 .duration = 150,
448 .queues = 0x1,
449 .interval = 20,
450 .always = 0,
451 },
452 .fwlog = {
453 .mode = WL12XX_FWLOG_ON_DEMAND,
454 .mem_blocks = 2,
455 .severity = 0,
456 .timestamp = WL12XX_FWLOG_TIMESTAMP_DISABLED,
457 .output = WL12XX_FWLOG_OUTPUT_HOST,
458 .threshold = 0,
459 },
460 .rate = {
461 .rate_retry_score = 32000,
462 .per_add = 8192,
463 .per_th1 = 2048,
464 .per_th2 = 4096,
465 .max_per = 8100,
466 .inverse_curiosity_factor = 5,
467 .tx_fail_low_th = 4,
468 .tx_fail_high_th = 10,
469 .per_alpha_shift = 4,
470 .per_add_shift = 13,
471 .per_beta1_shift = 10,
472 .per_beta2_shift = 8,
473 .rate_check_up = 2,
474 .rate_check_down = 12,
475 .rate_retry_policy = {
476 0x00, 0x00, 0x00, 0x00, 0x00,
477 0x00, 0x00, 0x00, 0x00, 0x00,
478 0x00, 0x00, 0x00,
479 },
480 },
481 .hangover = {
482 .recover_time = 0,
483 .hangover_period = 20,
484 .dynamic_mode = 1,
485 .early_termination_mode = 1,
486 .max_period = 20,
487 .min_period = 1,
488 .increase_delta = 1,
489 .decrease_delta = 2,
490 .quiet_time = 4,
491 .increase_time = 1,
492 .window_size = 16,
493 },
494};
495
496static struct wl18xx_priv_conf wl18xx_default_priv_conf = {
497 .phy = {
498 .phy_standalone = 0x00,
499 .primary_clock_setting_time = 0x05,
500 .clock_valid_on_wake_up = 0x00,
501 .secondary_clock_setting_time = 0x05,
502 .rdl = 0x01,
503 .auto_detect = 0x00,
504 .dedicated_fem = FEM_NONE,
505 .low_band_component = COMPONENT_2_WAY_SWITCH,
506 .low_band_component_type = 0x05,
507 .high_band_component = COMPONENT_2_WAY_SWITCH,
508 .high_band_component_type = 0x09,
509 .tcxo_ldo_voltage = 0x00,
510 .xtal_itrim_val = 0x04,
511 .srf_state = 0x00,
512 .io_configuration = 0x01,
513 .sdio_configuration = 0x00,
514 .settings = 0x00,
515 .enable_clpc = 0x00,
516 .enable_tx_low_pwr_on_siso_rdl = 0x00,
517 .rx_profile = 0x00,
518 .pwr_limit_reference_11_abg = 0xc8,
519 .psat = 0,
520 .low_power_val = 0x00,
521 .med_power_val = 0x0a,
522 .high_power_val = 0x1e,
523 .external_pa_dc2dc = 0,
524 .number_of_assembled_ant2_4 = 1,
525 .number_of_assembled_ant5 = 1,
526 },
527};
528
529static const struct wlcore_partition_set wl18xx_ptable[PART_TABLE_LEN] = {
530 [PART_TOP_PRCM_ELP_SOC] = {
531 .mem = { .start = 0x00A02000, .size = 0x00010000 },
532 .reg = { .start = 0x00807000, .size = 0x00005000 },
533 .mem2 = { .start = 0x00800000, .size = 0x0000B000 },
534 .mem3 = { .start = 0x00000000, .size = 0x00000000 },
535 },
536 [PART_DOWN] = {
537 .mem = { .start = 0x00000000, .size = 0x00014000 },
538 .reg = { .start = 0x00810000, .size = 0x0000BFFF },
539 .mem2 = { .start = 0x00000000, .size = 0x00000000 },
540 .mem3 = { .start = 0x00000000, .size = 0x00000000 },
541 },
542 [PART_BOOT] = {
543 .mem = { .start = 0x00700000, .size = 0x0000030c },
544 .reg = { .start = 0x00802000, .size = 0x00014578 },
545 .mem2 = { .start = 0x00B00404, .size = 0x00001000 },
546 .mem3 = { .start = 0x00C00000, .size = 0x00000400 },
547 },
548 [PART_WORK] = {
549 .mem = { .start = 0x00800000, .size = 0x000050FC },
550 .reg = { .start = 0x00B00404, .size = 0x00001000 },
551 .mem2 = { .start = 0x00C00000, .size = 0x00000400 },
552 .mem3 = { .start = 0x00000000, .size = 0x00000000 },
553 },
554 [PART_PHY_INIT] = {
555 .mem = { .start = 0x80926000,
556 .size = sizeof(struct wl18xx_mac_and_phy_params) },
557 .reg = { .start = 0x00000000, .size = 0x00000000 },
558 .mem2 = { .start = 0x00000000, .size = 0x00000000 },
559 .mem3 = { .start = 0x00000000, .size = 0x00000000 },
560 },
561};
562
563static const int wl18xx_rtable[REG_TABLE_LEN] = {
564 [REG_ECPU_CONTROL] = WL18XX_REG_ECPU_CONTROL,
565 [REG_INTERRUPT_NO_CLEAR] = WL18XX_REG_INTERRUPT_NO_CLEAR,
566 [REG_INTERRUPT_ACK] = WL18XX_REG_INTERRUPT_ACK,
567 [REG_COMMAND_MAILBOX_PTR] = WL18XX_REG_COMMAND_MAILBOX_PTR,
568 [REG_EVENT_MAILBOX_PTR] = WL18XX_REG_EVENT_MAILBOX_PTR,
569 [REG_INTERRUPT_TRIG] = WL18XX_REG_INTERRUPT_TRIG_H,
570 [REG_INTERRUPT_MASK] = WL18XX_REG_INTERRUPT_MASK,
571 [REG_PC_ON_RECOVERY] = WL18XX_SCR_PAD4,
572 [REG_CHIP_ID_B] = WL18XX_REG_CHIP_ID_B,
573 [REG_CMD_MBOX_ADDRESS] = WL18XX_CMD_MBOX_ADDRESS,
574
575 /* data access memory addresses, used with partition translation */
576 [REG_SLV_MEM_DATA] = WL18XX_SLV_MEM_DATA,
577 [REG_SLV_REG_DATA] = WL18XX_SLV_REG_DATA,
578
579 /* raw data access memory addresses */
580 [REG_RAW_FW_STATUS_ADDR] = WL18XX_FW_STATUS_ADDR,
581};
582
583static const struct wl18xx_clk_cfg wl18xx_clk_table[NUM_CLOCK_CONFIGS] = {
584 [CLOCK_CONFIG_16_2_M] = { 7, 104, 801, 4, true },
585 [CLOCK_CONFIG_16_368_M] = { 9, 132, 3751, 4, true },
586 [CLOCK_CONFIG_16_8_M] = { 7, 100, 0, 0, false },
587 [CLOCK_CONFIG_19_2_M] = { 8, 100, 0, 0, false },
588 [CLOCK_CONFIG_26_M] = { 13, 120, 0, 0, false },
589 [CLOCK_CONFIG_32_736_M] = { 9, 132, 3751, 4, true },
590 [CLOCK_CONFIG_33_6_M] = { 7, 100, 0, 0, false },
591 [CLOCK_CONFIG_38_468_M] = { 8, 100, 0, 0, false },
592 [CLOCK_CONFIG_52_M] = { 13, 120, 0, 0, false },
593};
594
595/* TODO: maybe move to a new header file? */
596#define WL18XX_FW_NAME "ti-connectivity/wl18xx-fw.bin"
597
598static int wl18xx_identify_chip(struct wl1271 *wl)
599{
600 int ret = 0;
601
602 switch (wl->chip.id) {
603 case CHIP_ID_185x_PG20:
604 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (185x PG20)",
605 wl->chip.id);
606 wl->sr_fw_name = WL18XX_FW_NAME;
607 /* wl18xx uses the same firmware for PLT */
608 wl->plt_fw_name = WL18XX_FW_NAME;
609 wl->quirks |= WLCORE_QUIRK_NO_ELP |
610 WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN |
611 WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN |
612 WLCORE_QUIRK_NO_SCHED_SCAN_WHILE_CONN |
613 WLCORE_QUIRK_TX_PAD_LAST_FRAME;
614
615 wlcore_set_min_fw_ver(wl, WL18XX_CHIP_VER, WL18XX_IFTYPE_VER,
616 WL18XX_MAJOR_VER, WL18XX_SUBTYPE_VER,
617 WL18XX_MINOR_VER);
618 break;
619 case CHIP_ID_185x_PG10:
620 wl1271_warning("chip id 0x%x (185x PG10) is deprecated",
621 wl->chip.id);
622 ret = -ENODEV;
623 goto out;
624
625 default:
626 wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
627 ret = -ENODEV;
628 goto out;
629 }
630
631out:
632 return ret;
633}
634
635static int wl18xx_set_clk(struct wl1271 *wl)
636{
637 u16 clk_freq;
638 int ret;
639
640 ret = wlcore_set_partition(wl, &wl->ptable[PART_TOP_PRCM_ELP_SOC]);
641 if (ret < 0)
642 goto out;
643
644 /* TODO: PG2: apparently we need to read the clk type */
645
646 ret = wl18xx_top_reg_read(wl, PRIMARY_CLK_DETECT, &clk_freq);
647 if (ret < 0)
648 goto out;
649
650 wl1271_debug(DEBUG_BOOT, "clock freq %d (%d, %d, %d, %d, %s)", clk_freq,
651 wl18xx_clk_table[clk_freq].n, wl18xx_clk_table[clk_freq].m,
652 wl18xx_clk_table[clk_freq].p, wl18xx_clk_table[clk_freq].q,
653 wl18xx_clk_table[clk_freq].swallow ? "swallow" : "spit");
654
655 ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_N,
656 wl18xx_clk_table[clk_freq].n);
657 if (ret < 0)
658 goto out;
659
660 ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_M,
661 wl18xx_clk_table[clk_freq].m);
662 if (ret < 0)
663 goto out;
664
665 if (wl18xx_clk_table[clk_freq].swallow) {
666 /* first the 16 lower bits */
667 ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_Q_FACTOR_CFG_1,
668 wl18xx_clk_table[clk_freq].q &
669 PLLSH_WCS_PLL_Q_FACTOR_CFG_1_MASK);
670 if (ret < 0)
671 goto out;
672
673 /* then the 16 higher bits, masked out */
674 ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_Q_FACTOR_CFG_2,
675 (wl18xx_clk_table[clk_freq].q >> 16) &
676 PLLSH_WCS_PLL_Q_FACTOR_CFG_2_MASK);
677 if (ret < 0)
678 goto out;
679
680 /* first the 16 lower bits */
681 ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_P_FACTOR_CFG_1,
682 wl18xx_clk_table[clk_freq].p &
683 PLLSH_WCS_PLL_P_FACTOR_CFG_1_MASK);
684 if (ret < 0)
685 goto out;
686
687 /* then the 16 higher bits, masked out */
688 ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_P_FACTOR_CFG_2,
689 (wl18xx_clk_table[clk_freq].p >> 16) &
690 PLLSH_WCS_PLL_P_FACTOR_CFG_2_MASK);
691 } else {
692 ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_SWALLOW_EN,
693 PLLSH_WCS_PLL_SWALLOW_EN_VAL2);
694 }
695
696out:
697 return ret;
698}
699
700static int wl18xx_boot_soft_reset(struct wl1271 *wl)
701{
702 int ret;
703
704 /* disable Rx/Tx */
705 ret = wlcore_write32(wl, WL18XX_ENABLE, 0x0);
706 if (ret < 0)
707 goto out;
708
709 /* disable auto calibration on start*/
710 ret = wlcore_write32(wl, WL18XX_SPARE_A2, 0xffff);
711
712out:
713 return ret;
714}
715
716static int wl18xx_pre_boot(struct wl1271 *wl)
717{
718 int ret;
719
720 ret = wl18xx_set_clk(wl);
721 if (ret < 0)
722 goto out;
723
724 /* Continue the ELP wake up sequence */
725 ret = wlcore_write32(wl, WL18XX_WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
726 if (ret < 0)
727 goto out;
728
729 udelay(500);
730
731 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
732 if (ret < 0)
733 goto out;
734
735 /* Disable interrupts */
736 ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
737 if (ret < 0)
738 goto out;
739
740 ret = wl18xx_boot_soft_reset(wl);
741
742out:
743 return ret;
744}
745
746static int wl18xx_pre_upload(struct wl1271 *wl)
747{
748 u32 tmp;
749 int ret;
750
751 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
752 if (ret < 0)
753 goto out;
754
755 /* TODO: check if this is all needed */
756 ret = wlcore_write32(wl, WL18XX_EEPROMLESS_IND, WL18XX_EEPROMLESS_IND);
757 if (ret < 0)
758 goto out;
759
760 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &tmp);
761 if (ret < 0)
762 goto out;
763
764 wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp);
765
766 ret = wlcore_read32(wl, WL18XX_SCR_PAD2, &tmp);
767
768out:
769 return ret;
770}
771
772static int wl18xx_set_mac_and_phy(struct wl1271 *wl)
773{
774 struct wl18xx_priv *priv = wl->priv;
775 int ret;
776
777 ret = wlcore_set_partition(wl, &wl->ptable[PART_PHY_INIT]);
778 if (ret < 0)
779 goto out;
780
781 ret = wlcore_write(wl, WL18XX_PHY_INIT_MEM_ADDR, (u8 *)&priv->conf.phy,
782 sizeof(struct wl18xx_mac_and_phy_params), false);
783
784out:
785 return ret;
786}
787
788static int wl18xx_enable_interrupts(struct wl1271 *wl)
789{
790 u32 event_mask, intr_mask;
791 int ret;
792
793 event_mask = WL18XX_ACX_EVENTS_VECTOR;
794 intr_mask = WL18XX_INTR_MASK;
795
796 ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK, event_mask);
797 if (ret < 0)
798 goto out;
799
800 wlcore_enable_interrupts(wl);
801
802 ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK,
803 WL1271_ACX_INTR_ALL & ~intr_mask);
804
805out:
806 return ret;
807}
808
809static int wl18xx_boot(struct wl1271 *wl)
810{
811 int ret;
812
813 ret = wl18xx_pre_boot(wl);
814 if (ret < 0)
815 goto out;
816
817 ret = wl18xx_pre_upload(wl);
818 if (ret < 0)
819 goto out;
820
821 ret = wlcore_boot_upload_firmware(wl);
822 if (ret < 0)
823 goto out;
824
825 ret = wl18xx_set_mac_and_phy(wl);
826 if (ret < 0)
827 goto out;
828
829 ret = wlcore_boot_run_firmware(wl);
830 if (ret < 0)
831 goto out;
832
833 ret = wl18xx_enable_interrupts(wl);
834
835out:
836 return ret;
837}
838
839static int wl18xx_trigger_cmd(struct wl1271 *wl, int cmd_box_addr,
840 void *buf, size_t len)
841{
842 struct wl18xx_priv *priv = wl->priv;
843
844 memcpy(priv->cmd_buf, buf, len);
845 memset(priv->cmd_buf + len, 0, WL18XX_CMD_MAX_SIZE - len);
846
847 return wlcore_write(wl, cmd_box_addr, priv->cmd_buf,
848 WL18XX_CMD_MAX_SIZE, false);
849}
850
851static int wl18xx_ack_event(struct wl1271 *wl)
852{
853 return wlcore_write_reg(wl, REG_INTERRUPT_TRIG,
854 WL18XX_INTR_TRIG_EVENT_ACK);
855}
856
857static u32 wl18xx_calc_tx_blocks(struct wl1271 *wl, u32 len, u32 spare_blks)
858{
859 u32 blk_size = WL18XX_TX_HW_BLOCK_SIZE;
860 return (len + blk_size - 1) / blk_size + spare_blks;
861}
862
863static void
864wl18xx_set_tx_desc_blocks(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc,
865 u32 blks, u32 spare_blks)
866{
867 desc->wl18xx_mem.total_mem_blocks = blks;
868}
869
870static void
871wl18xx_set_tx_desc_data_len(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc,
872 struct sk_buff *skb)
873{
874 desc->length = cpu_to_le16(skb->len);
875
876 /* if only the last frame is to be padded, we unset this bit on Tx */
877 if (wl->quirks & WLCORE_QUIRK_TX_PAD_LAST_FRAME)
878 desc->wl18xx_mem.ctrl = WL18XX_TX_CTRL_NOT_PADDED;
879 else
880 desc->wl18xx_mem.ctrl = 0;
881
882 wl1271_debug(DEBUG_TX, "tx_fill_hdr: hlid: %d "
883 "len: %d life: %d mem: %d", desc->hlid,
884 le16_to_cpu(desc->length),
885 le16_to_cpu(desc->life_time),
886 desc->wl18xx_mem.total_mem_blocks);
887}
888
889static enum wl_rx_buf_align
890wl18xx_get_rx_buf_align(struct wl1271 *wl, u32 rx_desc)
891{
892 if (rx_desc & RX_BUF_PADDED_PAYLOAD)
893 return WLCORE_RX_BUF_PADDED;
894
895 return WLCORE_RX_BUF_ALIGNED;
896}
897
898static u32 wl18xx_get_rx_packet_len(struct wl1271 *wl, void *rx_data,
899 u32 data_len)
900{
901 struct wl1271_rx_descriptor *desc = rx_data;
902
903 /* invalid packet */
904 if (data_len < sizeof(*desc))
905 return 0;
906
907 return data_len - sizeof(*desc);
908}
909
910static void wl18xx_tx_immediate_completion(struct wl1271 *wl)
911{
912 wl18xx_tx_immediate_complete(wl);
913}
914
915static int wl18xx_set_host_cfg_bitmap(struct wl1271 *wl, u32 extra_mem_blk)
916{
917 int ret;
918 u32 sdio_align_size = 0;
919 u32 host_cfg_bitmap = HOST_IF_CFG_RX_FIFO_ENABLE |
920 HOST_IF_CFG_ADD_RX_ALIGNMENT;
921
922 /* Enable Tx SDIO padding */
923 if (wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN) {
924 host_cfg_bitmap |= HOST_IF_CFG_TX_PAD_TO_SDIO_BLK;
925 sdio_align_size = WL12XX_BUS_BLOCK_SIZE;
926 }
927
928 /* Enable Rx SDIO padding */
929 if (wl->quirks & WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN) {
930 host_cfg_bitmap |= HOST_IF_CFG_RX_PAD_TO_SDIO_BLK;
931 sdio_align_size = WL12XX_BUS_BLOCK_SIZE;
932 }
933
934 ret = wl18xx_acx_host_if_cfg_bitmap(wl, host_cfg_bitmap,
935 sdio_align_size, extra_mem_blk,
936 WL18XX_HOST_IF_LEN_SIZE_FIELD);
937 if (ret < 0)
938 return ret;
939
940 return 0;
941}
942
943static int wl18xx_hw_init(struct wl1271 *wl)
944{
945 int ret;
946 struct wl18xx_priv *priv = wl->priv;
947
948 /* (re)init private structures. Relevant on recovery as well. */
949 priv->last_fw_rls_idx = 0;
950 priv->extra_spare_vif_count = 0;
951
952 /* set the default amount of spare blocks in the bitmap */
953 ret = wl18xx_set_host_cfg_bitmap(wl, WL18XX_TX_HW_BLOCK_SPARE);
954 if (ret < 0)
955 return ret;
956
957 if (checksum_param) {
958 ret = wl18xx_acx_set_checksum_state(wl);
959 if (ret != 0)
960 return ret;
961 }
962
963 return ret;
964}
965
966static void wl18xx_set_tx_desc_csum(struct wl1271 *wl,
967 struct wl1271_tx_hw_descr *desc,
968 struct sk_buff *skb)
969{
970 u32 ip_hdr_offset;
971 struct iphdr *ip_hdr;
972
973 if (!checksum_param) {
974 desc->wl18xx_checksum_data = 0;
975 return;
976 }
977
978 if (skb->ip_summed != CHECKSUM_PARTIAL) {
979 desc->wl18xx_checksum_data = 0;
980 return;
981 }
982
983 ip_hdr_offset = skb_network_header(skb) - skb_mac_header(skb);
984 if (WARN_ON(ip_hdr_offset >= (1<<7))) {
985 desc->wl18xx_checksum_data = 0;
986 return;
987 }
988
989 desc->wl18xx_checksum_data = ip_hdr_offset << 1;
990
991 /* FW is interested only in the LSB of the protocol TCP=0 UDP=1 */
992 ip_hdr = (void *)skb_network_header(skb);
993 desc->wl18xx_checksum_data |= (ip_hdr->protocol & 0x01);
994}
995
996static void wl18xx_set_rx_csum(struct wl1271 *wl,
997 struct wl1271_rx_descriptor *desc,
998 struct sk_buff *skb)
999{
1000 if (desc->status & WL18XX_RX_CHECKSUM_MASK)
1001 skb->ip_summed = CHECKSUM_UNNECESSARY;
1002}
1003
1004/*
1005 * TODO: instead of having these two functions to get the rate mask,
1006 * we should modify the wlvif->rate_set instead
1007 */
1008static u32 wl18xx_sta_get_ap_rate_mask(struct wl1271 *wl,
1009 struct wl12xx_vif *wlvif)
1010{
1011 u32 hw_rate_set = wlvif->rate_set;
1012
1013 if (wlvif->channel_type == NL80211_CHAN_HT40MINUS ||
1014 wlvif->channel_type == NL80211_CHAN_HT40PLUS) {
1015 wl1271_debug(DEBUG_ACX, "using wide channel rate mask");
1016 hw_rate_set |= CONF_TX_RATE_USE_WIDE_CHAN;
1017
1018 /* we don't support MIMO in wide-channel mode */
1019 hw_rate_set &= ~CONF_TX_MIMO_RATES;
1020 }
1021
1022 return hw_rate_set;
1023}
1024
1025static u32 wl18xx_ap_get_mimo_wide_rate_mask(struct wl1271 *wl,
1026 struct wl12xx_vif *wlvif)
1027{
1028 struct wl18xx_priv *priv = wl->priv;
1029
1030 if (wlvif->channel_type == NL80211_CHAN_HT40MINUS ||
1031 wlvif->channel_type == NL80211_CHAN_HT40PLUS) {
1032 wl1271_debug(DEBUG_ACX, "using wide channel rate mask");
1033
1034 /* sanity check - we don't support this */
1035 if (WARN_ON(wlvif->band != IEEE80211_BAND_5GHZ))
1036 return 0;
1037
1038 return CONF_TX_RATE_USE_WIDE_CHAN;
1039 } else if (priv->conf.phy.number_of_assembled_ant2_4 >= 2 &&
1040 wlvif->band == IEEE80211_BAND_2GHZ) {
1041 wl1271_debug(DEBUG_ACX, "using MIMO rate mask");
1042 /*
1043 * we don't care about HT channel here - if a peer doesn't
1044 * support MIMO, we won't enable it in its rates
1045 */
1046 return CONF_TX_MIMO_RATES;
1047 } else {
1048 return 0;
1049 }
1050}
1051
1052static int wl18xx_get_pg_ver(struct wl1271 *wl, s8 *ver)
1053{
1054 u32 fuse;
1055 int ret;
1056
1057 ret = wlcore_set_partition(wl, &wl->ptable[PART_TOP_PRCM_ELP_SOC]);
1058 if (ret < 0)
1059 goto out;
1060
1061 ret = wlcore_read32(wl, WL18XX_REG_FUSE_DATA_1_3, &fuse);
1062 if (ret < 0)
1063 goto out;
1064
1065 if (ver)
1066 *ver = (fuse & WL18XX_PG_VER_MASK) >> WL18XX_PG_VER_OFFSET;
1067
1068 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1069
1070out:
1071 return ret;
1072}
1073
1074#define WL18XX_CONF_FILE_NAME "ti-connectivity/wl18xx-conf.bin"
1075static int wl18xx_conf_init(struct wl1271 *wl, struct device *dev)
1076{
1077 struct wl18xx_priv *priv = wl->priv;
1078 struct wlcore_conf_file *conf_file;
1079 const struct firmware *fw;
1080 int ret;
1081
1082 ret = request_firmware(&fw, WL18XX_CONF_FILE_NAME, dev);
1083 if (ret < 0) {
1084 wl1271_error("could not get configuration binary %s: %d",
1085 WL18XX_CONF_FILE_NAME, ret);
1086 goto out_fallback;
1087 }
1088
1089 if (fw->size != WL18XX_CONF_SIZE) {
1090 wl1271_error("configuration binary file size is wrong, expected %zu got %zu",
1091 WL18XX_CONF_SIZE, fw->size);
1092 ret = -EINVAL;
1093 goto out;
1094 }
1095
1096 conf_file = (struct wlcore_conf_file *) fw->data;
1097
1098 if (conf_file->header.magic != cpu_to_le32(WL18XX_CONF_MAGIC)) {
1099 wl1271_error("configuration binary file magic number mismatch, "
1100 "expected 0x%0x got 0x%0x", WL18XX_CONF_MAGIC,
1101 conf_file->header.magic);
1102 ret = -EINVAL;
1103 goto out;
1104 }
1105
1106 if (conf_file->header.version != cpu_to_le32(WL18XX_CONF_VERSION)) {
1107 wl1271_error("configuration binary file version not supported, "
1108 "expected 0x%08x got 0x%08x",
1109 WL18XX_CONF_VERSION, conf_file->header.version);
1110 ret = -EINVAL;
1111 goto out;
1112 }
1113
1114 memcpy(&wl->conf, &conf_file->core, sizeof(wl18xx_conf));
1115 memcpy(&priv->conf, &conf_file->priv, sizeof(priv->conf));
1116
1117 goto out;
1118
1119out_fallback:
1120 wl1271_warning("falling back to default config");
1121
1122 /* apply driver default configuration */
1123 memcpy(&wl->conf, &wl18xx_conf, sizeof(wl18xx_conf));
1124 /* apply default private configuration */
1125 memcpy(&priv->conf, &wl18xx_default_priv_conf, sizeof(priv->conf));
1126
1127 /* For now we just fallback */
1128 return 0;
1129
1130out:
1131 release_firmware(fw);
1132 return ret;
1133}
1134
1135static int wl18xx_plt_init(struct wl1271 *wl)
1136{
1137 int ret;
1138
1139 ret = wlcore_write32(wl, WL18XX_SCR_PAD8, WL18XX_SCR_PAD8_PLT);
1140 if (ret < 0)
1141 return ret;
1142
1143 return wl->ops->boot(wl);
1144}
1145
1146static int wl18xx_get_mac(struct wl1271 *wl)
1147{
1148 u32 mac1, mac2;
1149 int ret;
1150
1151 ret = wlcore_set_partition(wl, &wl->ptable[PART_TOP_PRCM_ELP_SOC]);
1152 if (ret < 0)
1153 goto out;
1154
1155 ret = wlcore_read32(wl, WL18XX_REG_FUSE_BD_ADDR_1, &mac1);
1156 if (ret < 0)
1157 goto out;
1158
1159 ret = wlcore_read32(wl, WL18XX_REG_FUSE_BD_ADDR_2, &mac2);
1160 if (ret < 0)
1161 goto out;
1162
1163 /* these are the two parts of the BD_ADDR */
1164 wl->fuse_oui_addr = ((mac2 & 0xffff) << 8) +
1165 ((mac1 & 0xff000000) >> 24);
1166 wl->fuse_nic_addr = (mac1 & 0xffffff);
1167
1168 ret = wlcore_set_partition(wl, &wl->ptable[PART_DOWN]);
1169
1170out:
1171 return ret;
1172}
1173
1174static int wl18xx_handle_static_data(struct wl1271 *wl,
1175 struct wl1271_static_data *static_data)
1176{
1177 struct wl18xx_static_data_priv *static_data_priv =
1178 (struct wl18xx_static_data_priv *) static_data->priv;
1179
1180 wl1271_info("PHY firmware version: %s", static_data_priv->phy_version);
1181
1182 return 0;
1183}
1184
1185static int wl18xx_get_spare_blocks(struct wl1271 *wl, bool is_gem)
1186{
1187 struct wl18xx_priv *priv = wl->priv;
1188
1189 /* If we have VIFs requiring extra spare, indulge them */
1190 if (priv->extra_spare_vif_count)
1191 return WL18XX_TX_HW_EXTRA_BLOCK_SPARE;
1192
1193 return WL18XX_TX_HW_BLOCK_SPARE;
1194}
1195
1196static int wl18xx_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
1197 struct ieee80211_vif *vif,
1198 struct ieee80211_sta *sta,
1199 struct ieee80211_key_conf *key_conf)
1200{
1201 struct wl18xx_priv *priv = wl->priv;
1202 bool change_spare = false;
1203 int ret;
1204
1205 /*
1206 * when adding the first or removing the last GEM/TKIP interface,
1207 * we have to adjust the number of spare blocks.
1208 */
1209 change_spare = (key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
1210 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP) &&
1211 ((priv->extra_spare_vif_count == 0 && cmd == SET_KEY) ||
1212 (priv->extra_spare_vif_count == 1 && cmd == DISABLE_KEY));
1213
1214 /* no need to change spare - just regular set_key */
1215 if (!change_spare)
1216 return wlcore_set_key(wl, cmd, vif, sta, key_conf);
1217
1218 /*
1219 * stop the queues and flush to ensure the next packets are
1220 * in sync with FW spare block accounting
1221 */
1222 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
1223 wl1271_tx_flush(wl);
1224
1225 ret = wlcore_set_key(wl, cmd, vif, sta, key_conf);
1226 if (ret < 0)
1227 goto out;
1228
1229 /* key is now set, change the spare blocks */
1230 if (cmd == SET_KEY) {
1231 ret = wl18xx_set_host_cfg_bitmap(wl,
1232 WL18XX_TX_HW_EXTRA_BLOCK_SPARE);
1233 if (ret < 0)
1234 goto out;
1235
1236 priv->extra_spare_vif_count++;
1237 } else {
1238 ret = wl18xx_set_host_cfg_bitmap(wl,
1239 WL18XX_TX_HW_BLOCK_SPARE);
1240 if (ret < 0)
1241 goto out;
1242
1243 priv->extra_spare_vif_count--;
1244 }
1245
1246out:
1247 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
1248 return ret;
1249}
1250
1251static u32 wl18xx_pre_pkt_send(struct wl1271 *wl,
1252 u32 buf_offset, u32 last_len)
1253{
1254 if (wl->quirks & WLCORE_QUIRK_TX_PAD_LAST_FRAME) {
1255 struct wl1271_tx_hw_descr *last_desc;
1256
1257 /* get the last TX HW descriptor written to the aggr buf */
1258 last_desc = (struct wl1271_tx_hw_descr *)(wl->aggr_buf +
1259 buf_offset - last_len);
1260
1261 /* the last frame is padded up to an SDIO block */
1262 last_desc->wl18xx_mem.ctrl &= ~WL18XX_TX_CTRL_NOT_PADDED;
1263 return ALIGN(buf_offset, WL12XX_BUS_BLOCK_SIZE);
1264 }
1265
1266 /* no modifications */
1267 return buf_offset;
1268}
1269
1270static struct wlcore_ops wl18xx_ops = {
1271 .identify_chip = wl18xx_identify_chip,
1272 .boot = wl18xx_boot,
1273 .plt_init = wl18xx_plt_init,
1274 .trigger_cmd = wl18xx_trigger_cmd,
1275 .ack_event = wl18xx_ack_event,
1276 .calc_tx_blocks = wl18xx_calc_tx_blocks,
1277 .set_tx_desc_blocks = wl18xx_set_tx_desc_blocks,
1278 .set_tx_desc_data_len = wl18xx_set_tx_desc_data_len,
1279 .get_rx_buf_align = wl18xx_get_rx_buf_align,
1280 .get_rx_packet_len = wl18xx_get_rx_packet_len,
1281 .tx_immediate_compl = wl18xx_tx_immediate_completion,
1282 .tx_delayed_compl = NULL,
1283 .hw_init = wl18xx_hw_init,
1284 .set_tx_desc_csum = wl18xx_set_tx_desc_csum,
1285 .get_pg_ver = wl18xx_get_pg_ver,
1286 .set_rx_csum = wl18xx_set_rx_csum,
1287 .sta_get_ap_rate_mask = wl18xx_sta_get_ap_rate_mask,
1288 .ap_get_mimo_wide_rate_mask = wl18xx_ap_get_mimo_wide_rate_mask,
1289 .get_mac = wl18xx_get_mac,
1290 .debugfs_init = wl18xx_debugfs_add_files,
1291 .handle_static_data = wl18xx_handle_static_data,
1292 .get_spare_blocks = wl18xx_get_spare_blocks,
1293 .set_key = wl18xx_set_key,
1294 .pre_pkt_send = wl18xx_pre_pkt_send,
1295};
1296
1297/* HT cap appropriate for wide channels in 2Ghz */
1298static struct ieee80211_sta_ht_cap wl18xx_siso40_ht_cap_2ghz = {
1299 .cap = IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 |
1300 IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_DSSSCCK40,
1301 .ht_supported = true,
1302 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
1303 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
1304 .mcs = {
1305 .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
1306 .rx_highest = cpu_to_le16(150),
1307 .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
1308 },
1309};
1310
1311/* HT cap appropriate for wide channels in 5Ghz */
1312static struct ieee80211_sta_ht_cap wl18xx_siso40_ht_cap_5ghz = {
1313 .cap = IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 |
1314 IEEE80211_HT_CAP_SUP_WIDTH_20_40,
1315 .ht_supported = true,
1316 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
1317 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
1318 .mcs = {
1319 .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
1320 .rx_highest = cpu_to_le16(150),
1321 .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
1322 },
1323};
1324
1325/* HT cap appropriate for SISO 20 */
1326static struct ieee80211_sta_ht_cap wl18xx_siso20_ht_cap = {
1327 .cap = IEEE80211_HT_CAP_SGI_20,
1328 .ht_supported = true,
1329 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
1330 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
1331 .mcs = {
1332 .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
1333 .rx_highest = cpu_to_le16(72),
1334 .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
1335 },
1336};
1337
1338/* HT cap appropriate for MIMO rates in 20mhz channel */
1339static struct ieee80211_sta_ht_cap wl18xx_mimo_ht_cap_2ghz = {
1340 .cap = IEEE80211_HT_CAP_SGI_20,
1341 .ht_supported = true,
1342 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
1343 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
1344 .mcs = {
1345 .rx_mask = { 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, },
1346 .rx_highest = cpu_to_le16(144),
1347 .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
1348 },
1349};
1350
1351static int __devinit wl18xx_probe(struct platform_device *pdev)
1352{
1353 struct wl1271 *wl;
1354 struct ieee80211_hw *hw;
1355 struct wl18xx_priv *priv;
1356 int ret;
1357
1358 hw = wlcore_alloc_hw(sizeof(*priv));
1359 if (IS_ERR(hw)) {
1360 wl1271_error("can't allocate hw");
1361 ret = PTR_ERR(hw);
1362 goto out;
1363 }
1364
1365 wl = hw->priv;
1366 priv = wl->priv;
1367 wl->ops = &wl18xx_ops;
1368 wl->ptable = wl18xx_ptable;
1369 wl->rtable = wl18xx_rtable;
1370 wl->num_tx_desc = 32;
1371 wl->num_rx_desc = 32;
1372 wl->band_rate_to_idx = wl18xx_band_rate_to_idx;
1373 wl->hw_tx_rate_tbl_size = WL18XX_CONF_HW_RXTX_RATE_MAX;
1374 wl->hw_min_ht_rate = WL18XX_CONF_HW_RXTX_RATE_MCS0;
1375 wl->fw_status_priv_len = sizeof(struct wl18xx_fw_status_priv);
1376 wl->stats.fw_stats_len = sizeof(struct wl18xx_acx_statistics);
1377 wl->static_data_priv_len = sizeof(struct wl18xx_static_data_priv);
1378
1379 if (num_rx_desc_param != -1)
1380 wl->num_rx_desc = num_rx_desc_param;
1381
1382 ret = wl18xx_conf_init(wl, &pdev->dev);
1383 if (ret < 0)
1384 goto out_free;
1385
1386 if (!strcmp(board_type_param, "fpga")) {
1387 priv->conf.phy.board_type = BOARD_TYPE_FPGA_18XX;
1388 } else if (!strcmp(board_type_param, "hdk")) {
1389 priv->conf.phy.board_type = BOARD_TYPE_HDK_18XX;
1390 /* HACK! Just for now we hardcode HDK to 0x06 */
1391 priv->conf.phy.low_band_component_type = 0x06;
1392 } else if (!strcmp(board_type_param, "dvp")) {
1393 priv->conf.phy.board_type = BOARD_TYPE_DVP_18XX;
1394 } else if (!strcmp(board_type_param, "evb")) {
1395 priv->conf.phy.board_type = BOARD_TYPE_EVB_18XX;
1396 } else if (!strcmp(board_type_param, "com8")) {
1397 priv->conf.phy.board_type = BOARD_TYPE_COM8_18XX;
1398 /* HACK! Just for now we hardcode COM8 to 0x06 */
1399 priv->conf.phy.low_band_component_type = 0x06;
1400 } else {
1401 wl1271_error("invalid board type '%s'", board_type_param);
1402 ret = -EINVAL;
1403 goto out_free;
1404 }
1405
1406 /* If the module param is set, update it in conf */
1407 if (low_band_component_param != -1)
1408 priv->conf.phy.low_band_component = low_band_component_param;
1409 if (low_band_component_type_param != -1)
1410 priv->conf.phy.low_band_component_type =
1411 low_band_component_type_param;
1412 if (high_band_component_param != -1)
1413 priv->conf.phy.high_band_component = high_band_component_param;
1414 if (high_band_component_type_param != -1)
1415 priv->conf.phy.high_band_component_type =
1416 high_band_component_type_param;
1417 if (pwr_limit_reference_11_abg_param != -1)
1418 priv->conf.phy.pwr_limit_reference_11_abg =
1419 pwr_limit_reference_11_abg_param;
1420 if (n_antennas_2_param != -1)
1421 priv->conf.phy.number_of_assembled_ant2_4 = n_antennas_2_param;
1422 if (n_antennas_5_param != -1)
1423 priv->conf.phy.number_of_assembled_ant5 = n_antennas_5_param;
1424 if (dc2dc_param != -1)
1425 priv->conf.phy.external_pa_dc2dc = dc2dc_param;
1426
1427 if (!strcmp(ht_mode_param, "default")) {
1428 /*
1429 * Only support mimo with multiple antennas. Fall back to
1430 * siso20.
1431 */
1432 if (priv->conf.phy.number_of_assembled_ant2_4 >= 2)
1433 wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
1434 &wl18xx_mimo_ht_cap_2ghz);
1435 else
1436 wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
1437 &wl18xx_siso20_ht_cap);
1438
1439 /* 5Ghz is always wide */
1440 wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ,
1441 &wl18xx_siso40_ht_cap_5ghz);
1442 } else if (!strcmp(ht_mode_param, "wide")) {
1443 wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
1444 &wl18xx_siso40_ht_cap_2ghz);
1445 wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ,
1446 &wl18xx_siso40_ht_cap_5ghz);
1447 } else if (!strcmp(ht_mode_param, "siso20")) {
1448 wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
1449 &wl18xx_siso20_ht_cap);
1450 wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ,
1451 &wl18xx_siso20_ht_cap);
1452 } else {
1453 wl1271_error("invalid ht_mode '%s'", ht_mode_param);
1454 ret = -EINVAL;
1455 goto out_free;
1456 }
1457
1458 if (!checksum_param) {
1459 wl18xx_ops.set_rx_csum = NULL;
1460 wl18xx_ops.init_vif = NULL;
1461 }
1462
1463 wl->enable_11a = enable_11a_param;
1464
1465 return wlcore_probe(wl, pdev);
1466
1467out_free:
1468 wlcore_free_hw(wl);
1469out:
1470 return ret;
1471}
1472
1473static const struct platform_device_id wl18xx_id_table[] __devinitconst = {
1474 { "wl18xx", 0 },
1475 { } /* Terminating Entry */
1476};
1477MODULE_DEVICE_TABLE(platform, wl18xx_id_table);
1478
1479static struct platform_driver wl18xx_driver = {
1480 .probe = wl18xx_probe,
1481 .remove = __devexit_p(wlcore_remove),
1482 .id_table = wl18xx_id_table,
1483 .driver = {
1484 .name = "wl18xx_driver",
1485 .owner = THIS_MODULE,
1486 }
1487};
1488
1489static int __init wl18xx_init(void)
1490{
1491 return platform_driver_register(&wl18xx_driver);
1492}
1493module_init(wl18xx_init);
1494
1495static void __exit wl18xx_exit(void)
1496{
1497 platform_driver_unregister(&wl18xx_driver);
1498}
1499module_exit(wl18xx_exit);
1500
1501module_param_named(ht_mode, ht_mode_param, charp, S_IRUSR);
1502MODULE_PARM_DESC(ht_mode, "Force HT mode: wide or siso20");
1503
1504module_param_named(board_type, board_type_param, charp, S_IRUSR);
1505MODULE_PARM_DESC(board_type, "Board type: fpga, hdk (default), evb, com8 or "
1506 "dvp");
1507
1508module_param_named(checksum, checksum_param, bool, S_IRUSR);
1509MODULE_PARM_DESC(checksum, "Enable TCP checksum: boolean (defaults to false)");
1510
1511module_param_named(enable_11a, enable_11a_param, bool, S_IRUSR);
1512MODULE_PARM_DESC(enable_11a, "Enable 11a (5GHz): boolean (defaults to true)");
1513
1514module_param_named(dc2dc, dc2dc_param, int, S_IRUSR);
1515MODULE_PARM_DESC(dc2dc, "External DC2DC: u8 (defaults to 0)");
1516
1517module_param_named(n_antennas_2, n_antennas_2_param, int, S_IRUSR);
1518MODULE_PARM_DESC(n_antennas_2,
1519 "Number of installed 2.4GHz antennas: 1 (default) or 2");
1520
1521module_param_named(n_antennas_5, n_antennas_5_param, int, S_IRUSR);
1522MODULE_PARM_DESC(n_antennas_5,
1523 "Number of installed 5GHz antennas: 1 (default) or 2");
1524
1525module_param_named(low_band_component, low_band_component_param, int,
1526 S_IRUSR);
1527MODULE_PARM_DESC(low_band_component, "Low band component: u8 "
1528 "(default is 0x01)");
1529
1530module_param_named(low_band_component_type, low_band_component_type_param,
1531 int, S_IRUSR);
1532MODULE_PARM_DESC(low_band_component_type, "Low band component type: u8 "
1533 "(default is 0x05 or 0x06 depending on the board_type)");
1534
1535module_param_named(high_band_component, high_band_component_param, int,
1536 S_IRUSR);
1537MODULE_PARM_DESC(high_band_component, "High band component: u8, "
1538 "(default is 0x01)");
1539
1540module_param_named(high_band_component_type, high_band_component_type_param,
1541 int, S_IRUSR);
1542MODULE_PARM_DESC(high_band_component_type, "High band component type: u8 "
1543 "(default is 0x09)");
1544
1545module_param_named(pwr_limit_reference_11_abg,
1546 pwr_limit_reference_11_abg_param, int, S_IRUSR);
1547MODULE_PARM_DESC(pwr_limit_reference_11_abg, "Power limit reference: u8 "
1548 "(default is 0xc8)");
1549
1550module_param_named(num_rx_desc,
1551 num_rx_desc_param, int, S_IRUSR);
1552MODULE_PARM_DESC(num_rx_desc_param,
1553 "Number of Rx descriptors: u8 (default is 32)");
1554
1555MODULE_LICENSE("GPL v2");
1556MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
1557MODULE_FIRMWARE(WL18XX_FW_NAME);
diff --git a/drivers/net/wireless/ti/wl18xx/reg.h b/drivers/net/wireless/ti/wl18xx/reg.h
new file mode 100644
index 000000000000..937b71d8783f
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/reg.h
@@ -0,0 +1,191 @@
1/*
2 * This file is part of wlcore
3 *
4 * Copyright (C) 2011 Texas Instruments Inc.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#ifndef __REG_H__
23#define __REG_H__
24
25#define WL18XX_REGISTERS_BASE 0x00800000
26#define WL18XX_CODE_BASE 0x00000000
27#define WL18XX_DATA_BASE 0x00400000
28#define WL18XX_DOUBLE_BUFFER_BASE 0x00600000
29#define WL18XX_MCU_KEY_SEARCH_BASE 0x00700000
30#define WL18XX_PHY_BASE 0x00900000
31#define WL18XX_TOP_OCP_BASE 0x00A00000
32#define WL18XX_PACKET_RAM_BASE 0x00B00000
33#define WL18XX_HOST_BASE 0x00C00000
34
35#define WL18XX_REGISTERS_DOWN_SIZE 0x0000B000
36
37#define WL18XX_REG_BOOT_PART_START 0x00802000
38#define WL18XX_REG_BOOT_PART_SIZE 0x00014578
39
40#define WL18XX_PHY_INIT_MEM_ADDR 0x80926000
41
42#define WL18XX_SDIO_WSPI_BASE (WL18XX_REGISTERS_BASE)
43#define WL18XX_REG_CONFIG_BASE (WL18XX_REGISTERS_BASE + 0x02000)
44#define WL18XX_WGCM_REGS_BASE (WL18XX_REGISTERS_BASE + 0x03000)
45#define WL18XX_ENC_BASE (WL18XX_REGISTERS_BASE + 0x04000)
46#define WL18XX_INTERRUPT_BASE (WL18XX_REGISTERS_BASE + 0x05000)
47#define WL18XX_UART_BASE (WL18XX_REGISTERS_BASE + 0x06000)
48#define WL18XX_WELP_BASE (WL18XX_REGISTERS_BASE + 0x07000)
49#define WL18XX_TCP_CKSM_BASE (WL18XX_REGISTERS_BASE + 0x08000)
50#define WL18XX_FIFO_BASE (WL18XX_REGISTERS_BASE + 0x09000)
51#define WL18XX_OCP_BRIDGE_BASE (WL18XX_REGISTERS_BASE + 0x0A000)
52#define WL18XX_PMAC_RX_BASE (WL18XX_REGISTERS_BASE + 0x14800)
53#define WL18XX_PMAC_ACM_BASE (WL18XX_REGISTERS_BASE + 0x14C00)
54#define WL18XX_PMAC_TX_BASE (WL18XX_REGISTERS_BASE + 0x15000)
55#define WL18XX_PMAC_CSR_BASE (WL18XX_REGISTERS_BASE + 0x15400)
56
57#define WL18XX_REG_ECPU_CONTROL (WL18XX_REGISTERS_BASE + 0x02004)
58#define WL18XX_REG_INTERRUPT_NO_CLEAR (WL18XX_REGISTERS_BASE + 0x050E8)
59#define WL18XX_REG_INTERRUPT_ACK (WL18XX_REGISTERS_BASE + 0x050F0)
60#define WL18XX_REG_INTERRUPT_TRIG (WL18XX_REGISTERS_BASE + 0x5074)
61#define WL18XX_REG_INTERRUPT_TRIG_H (WL18XX_REGISTERS_BASE + 0x5078)
62#define WL18XX_REG_INTERRUPT_MASK (WL18XX_REGISTERS_BASE + 0x0050DC)
63
64#define WL18XX_REG_CHIP_ID_B (WL18XX_REGISTERS_BASE + 0x01542C)
65
66#define WL18XX_SLV_MEM_DATA (WL18XX_HOST_BASE + 0x0018)
67#define WL18XX_SLV_REG_DATA (WL18XX_HOST_BASE + 0x0008)
68
69/* Scratch Pad registers*/
70#define WL18XX_SCR_PAD0 (WL18XX_REGISTERS_BASE + 0x0154EC)
71#define WL18XX_SCR_PAD1 (WL18XX_REGISTERS_BASE + 0x0154F0)
72#define WL18XX_SCR_PAD2 (WL18XX_REGISTERS_BASE + 0x0154F4)
73#define WL18XX_SCR_PAD3 (WL18XX_REGISTERS_BASE + 0x0154F8)
74#define WL18XX_SCR_PAD4 (WL18XX_REGISTERS_BASE + 0x0154FC)
75#define WL18XX_SCR_PAD4_SET (WL18XX_REGISTERS_BASE + 0x015504)
76#define WL18XX_SCR_PAD4_CLR (WL18XX_REGISTERS_BASE + 0x015500)
77#define WL18XX_SCR_PAD5 (WL18XX_REGISTERS_BASE + 0x015508)
78#define WL18XX_SCR_PAD5_SET (WL18XX_REGISTERS_BASE + 0x015510)
79#define WL18XX_SCR_PAD5_CLR (WL18XX_REGISTERS_BASE + 0x01550C)
80#define WL18XX_SCR_PAD6 (WL18XX_REGISTERS_BASE + 0x015514)
81#define WL18XX_SCR_PAD7 (WL18XX_REGISTERS_BASE + 0x015518)
82#define WL18XX_SCR_PAD8 (WL18XX_REGISTERS_BASE + 0x01551C)
83#define WL18XX_SCR_PAD9 (WL18XX_REGISTERS_BASE + 0x015520)
84
85/* Spare registers*/
86#define WL18XX_SPARE_A1 (WL18XX_REGISTERS_BASE + 0x002194)
87#define WL18XX_SPARE_A2 (WL18XX_REGISTERS_BASE + 0x002198)
88#define WL18XX_SPARE_A3 (WL18XX_REGISTERS_BASE + 0x00219C)
89#define WL18XX_SPARE_A4 (WL18XX_REGISTERS_BASE + 0x0021A0)
90#define WL18XX_SPARE_A5 (WL18XX_REGISTERS_BASE + 0x0021A4)
91#define WL18XX_SPARE_A6 (WL18XX_REGISTERS_BASE + 0x0021A8)
92#define WL18XX_SPARE_A7 (WL18XX_REGISTERS_BASE + 0x0021AC)
93#define WL18XX_SPARE_A8 (WL18XX_REGISTERS_BASE + 0x0021B0)
94#define WL18XX_SPARE_B1 (WL18XX_REGISTERS_BASE + 0x015524)
95#define WL18XX_SPARE_B2 (WL18XX_REGISTERS_BASE + 0x015528)
96#define WL18XX_SPARE_B3 (WL18XX_REGISTERS_BASE + 0x01552C)
97#define WL18XX_SPARE_B4 (WL18XX_REGISTERS_BASE + 0x015530)
98#define WL18XX_SPARE_B5 (WL18XX_REGISTERS_BASE + 0x015534)
99#define WL18XX_SPARE_B6 (WL18XX_REGISTERS_BASE + 0x015538)
100#define WL18XX_SPARE_B7 (WL18XX_REGISTERS_BASE + 0x01553C)
101#define WL18XX_SPARE_B8 (WL18XX_REGISTERS_BASE + 0x015540)
102
103#define WL18XX_REG_COMMAND_MAILBOX_PTR (WL18XX_SCR_PAD0)
104#define WL18XX_REG_EVENT_MAILBOX_PTR (WL18XX_SCR_PAD1)
105#define WL18XX_EEPROMLESS_IND (WL18XX_SCR_PAD4)
106
107#define WL18XX_WELP_ARM_COMMAND (WL18XX_REGISTERS_BASE + 0x7100)
108#define WL18XX_ENABLE (WL18XX_REGISTERS_BASE + 0x01543C)
109
110/* PRCM registers */
111#define PLATFORM_DETECTION 0xA0E3E0
112#define OCS_EN 0xA02080
113#define PRIMARY_CLK_DETECT 0xA020A6
114#define PLLSH_WCS_PLL_N 0xA02362
115#define PLLSH_WCS_PLL_M 0xA02360
116#define PLLSH_WCS_PLL_Q_FACTOR_CFG_1 0xA02364
117#define PLLSH_WCS_PLL_Q_FACTOR_CFG_2 0xA02366
118#define PLLSH_WCS_PLL_P_FACTOR_CFG_1 0xA02368
119#define PLLSH_WCS_PLL_P_FACTOR_CFG_2 0xA0236A
120#define PLLSH_WCS_PLL_SWALLOW_EN 0xA0236C
121#define PLLSH_WL_PLL_EN 0xA02392
122
123#define PLLSH_WCS_PLL_Q_FACTOR_CFG_1_MASK 0xFFFF
124#define PLLSH_WCS_PLL_Q_FACTOR_CFG_2_MASK 0x007F
125#define PLLSH_WCS_PLL_P_FACTOR_CFG_1_MASK 0xFFFF
126#define PLLSH_WCS_PLL_P_FACTOR_CFG_2_MASK 0x000F
127
128#define PLLSH_WCS_PLL_SWALLOW_EN_VAL1 0x1
129#define PLLSH_WCS_PLL_SWALLOW_EN_VAL2 0x12
130
131#define WL18XX_REG_FUSE_DATA_1_3 0xA0260C
132#define WL18XX_PG_VER_MASK 0x70
133#define WL18XX_PG_VER_OFFSET 4
134
135#define WL18XX_REG_FUSE_BD_ADDR_1 0xA02602
136#define WL18XX_REG_FUSE_BD_ADDR_2 0xA02606
137
138#define WL18XX_CMD_MBOX_ADDRESS 0xB007B4
139
140#define WL18XX_FW_STATUS_ADDR 0x50F8
141
142#define CHIP_ID_185x_PG10 (0x06030101)
143#define CHIP_ID_185x_PG20 (0x06030111)
144
145/*
146 * Host Command Interrupt. Setting this bit masks
147 * the interrupt that the host issues to inform
148 * the FW that it has sent a command
149 * to the Wlan hardware Command Mailbox.
150 */
151#define WL18XX_INTR_TRIG_CMD BIT(28)
152
153/*
154 * Host Event Acknowlegde Interrupt. The host
155 * sets this bit to acknowledge that it received
156 * the unsolicited information from the event
157 * mailbox.
158 */
159#define WL18XX_INTR_TRIG_EVENT_ACK BIT(29)
160
161/*
162 * To boot the firmware in PLT mode we need to write this value in
163 * SCR_PAD8 before starting.
164 */
165#define WL18XX_SCR_PAD8_PLT 0xBABABEBE
166
167enum {
168 COMPONENT_NO_SWITCH = 0x0,
169 COMPONENT_2_WAY_SWITCH = 0x1,
170 COMPONENT_3_WAY_SWITCH = 0x2,
171 COMPONENT_MATCHING = 0x3,
172};
173
174enum {
175 FEM_NONE = 0x0,
176 FEM_VENDOR_1 = 0x1,
177 FEM_VENDOR_2 = 0x2,
178 FEM_VENDOR_3 = 0x3,
179};
180
181enum {
182 BOARD_TYPE_EVB_18XX = 0,
183 BOARD_TYPE_DVP_18XX = 1,
184 BOARD_TYPE_HDK_18XX = 2,
185 BOARD_TYPE_FPGA_18XX = 3,
186 BOARD_TYPE_COM8_18XX = 4,
187
188 NUM_BOARD_TYPES,
189};
190
191#endif /* __REG_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/tx.c b/drivers/net/wireless/ti/wl18xx/tx.c
new file mode 100644
index 000000000000..5b1fb10d9fd7
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/tx.c
@@ -0,0 +1,127 @@
1/*
2 * This file is part of wl18xx
3 *
4 * Copyright (C) 2011 Texas Instruments Inc.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#include "../wlcore/wlcore.h"
23#include "../wlcore/cmd.h"
24#include "../wlcore/debug.h"
25#include "../wlcore/acx.h"
26#include "../wlcore/tx.h"
27
28#include "wl18xx.h"
29#include "tx.h"
30
31static void wl18xx_tx_complete_packet(struct wl1271 *wl, u8 tx_stat_byte)
32{
33 struct ieee80211_tx_info *info;
34 struct sk_buff *skb;
35 int id = tx_stat_byte & WL18XX_TX_STATUS_DESC_ID_MASK;
36 bool tx_success;
37
38 /* check for id legality */
39 if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) {
40 wl1271_warning("illegal id in tx completion: %d", id);
41 return;
42 }
43
44 /* a zero bit indicates Tx success */
45 tx_success = !(tx_stat_byte & BIT(WL18XX_TX_STATUS_STAT_BIT_IDX));
46
47
48 skb = wl->tx_frames[id];
49 info = IEEE80211_SKB_CB(skb);
50
51 if (wl12xx_is_dummy_packet(wl, skb)) {
52 wl1271_free_tx_id(wl, id);
53 return;
54 }
55
56 /* update the TX status info */
57 if (tx_success && !(info->flags & IEEE80211_TX_CTL_NO_ACK))
58 info->flags |= IEEE80211_TX_STAT_ACK;
59
60 /* no real data about Tx completion */
61 info->status.rates[0].idx = -1;
62 info->status.rates[0].count = 0;
63 info->status.rates[0].flags = 0;
64 info->status.ack_signal = -1;
65
66 if (!tx_success)
67 wl->stats.retry_count++;
68
69 /*
70 * TODO: update sequence number for encryption? seems to be
71 * unsupported for now. needed for recovery with encryption.
72 */
73
74 /* remove private header from packet */
75 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
76
77 /* remove TKIP header space if present */
78 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
79 info->control.hw_key &&
80 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
81 int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
82 memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data, hdrlen);
83 skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
84 }
85
86 wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p success %d",
87 id, skb, tx_success);
88
89 /* return the packet to the stack */
90 skb_queue_tail(&wl->deferred_tx_queue, skb);
91 queue_work(wl->freezable_wq, &wl->netstack_work);
92 wl1271_free_tx_id(wl, id);
93}
94
95void wl18xx_tx_immediate_complete(struct wl1271 *wl)
96{
97 struct wl18xx_fw_status_priv *status_priv =
98 (struct wl18xx_fw_status_priv *)wl->fw_status_2->priv;
99 struct wl18xx_priv *priv = wl->priv;
100 u8 i;
101
102 /* nothing to do here */
103 if (priv->last_fw_rls_idx == status_priv->fw_release_idx)
104 return;
105
106 /* freed Tx descriptors */
107 wl1271_debug(DEBUG_TX, "last released desc = %d, current idx = %d",
108 priv->last_fw_rls_idx, status_priv->fw_release_idx);
109
110 if (status_priv->fw_release_idx >= WL18XX_FW_MAX_TX_STATUS_DESC) {
111 wl1271_error("invalid desc release index %d",
112 status_priv->fw_release_idx);
113 WARN_ON(1);
114 return;
115 }
116
117 for (i = priv->last_fw_rls_idx;
118 i != status_priv->fw_release_idx;
119 i = (i + 1) % WL18XX_FW_MAX_TX_STATUS_DESC) {
120 wl18xx_tx_complete_packet(wl,
121 status_priv->released_tx_desc[i]);
122
123 wl->tx_results_count++;
124 }
125
126 priv->last_fw_rls_idx = status_priv->fw_release_idx;
127}
diff --git a/drivers/net/wireless/ti/wl18xx/tx.h b/drivers/net/wireless/ti/wl18xx/tx.h
new file mode 100644
index 000000000000..ccddc548e44a
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/tx.h
@@ -0,0 +1,46 @@
1/*
2 * This file is part of wl18xx
3 *
4 * Copyright (C) 2011 Texas Instruments. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#ifndef __WL18XX_TX_H__
23#define __WL18XX_TX_H__
24
25#include "../wlcore/wlcore.h"
26
27#define WL18XX_TX_HW_BLOCK_SPARE 1
28/* for special cases - namely, TKIP and GEM */
29#define WL18XX_TX_HW_EXTRA_BLOCK_SPARE 2
30#define WL18XX_TX_HW_BLOCK_SIZE 268
31
32#define WL18XX_TX_STATUS_DESC_ID_MASK 0x7F
33#define WL18XX_TX_STATUS_STAT_BIT_IDX 7
34
35/* Indicates this TX HW frame is not padded to SDIO block size */
36#define WL18XX_TX_CTRL_NOT_PADDED BIT(7)
37
38/*
39 * The FW uses a special bit to indicate a wide channel should be used in
40 * the rate policy.
41 */
42#define CONF_TX_RATE_USE_WIDE_CHAN BIT(31)
43
44void wl18xx_tx_immediate_complete(struct wl1271 *wl);
45
46#endif /* __WL12XX_TX_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/wl18xx.h b/drivers/net/wireless/ti/wl18xx/wl18xx.h
new file mode 100644
index 000000000000..6452396fa1d4
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/wl18xx.h
@@ -0,0 +1,95 @@
1/*
2 * This file is part of wl18xx
3 *
4 * Copyright (C) 2011 Texas Instruments Inc.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#ifndef __WL18XX_PRIV_H__
23#define __WL18XX_PRIV_H__
24
25#include "conf.h"
26
27/* minimum FW required for driver */
28#define WL18XX_CHIP_VER 8
29#define WL18XX_IFTYPE_VER 2
30#define WL18XX_MAJOR_VER 0
31#define WL18XX_SUBTYPE_VER 0
32#define WL18XX_MINOR_VER 100
33
34#define WL18XX_CMD_MAX_SIZE 740
35
36struct wl18xx_priv {
37 /* buffer for sending commands to FW */
38 u8 cmd_buf[WL18XX_CMD_MAX_SIZE];
39
40 struct wl18xx_priv_conf conf;
41
42 /* Index of last released Tx desc in FW */
43 u8 last_fw_rls_idx;
44
45 /* number of VIFs requiring extra spare mem-blocks */
46 int extra_spare_vif_count;
47};
48
49#define WL18XX_FW_MAX_TX_STATUS_DESC 33
50
51struct wl18xx_fw_status_priv {
52 /*
53 * Index in released_tx_desc for first byte that holds
54 * released tx host desc
55 */
56 u8 fw_release_idx;
57
58 /*
59 * Array of host Tx descriptors, where fw_release_idx
60 * indicated the first released idx.
61 */
62 u8 released_tx_desc[WL18XX_FW_MAX_TX_STATUS_DESC];
63
64 u8 padding[2];
65};
66
67#define WL18XX_PHY_VERSION_MAX_LEN 20
68
69struct wl18xx_static_data_priv {
70 char phy_version[WL18XX_PHY_VERSION_MAX_LEN];
71};
72
73struct wl18xx_clk_cfg {
74 u32 n;
75 u32 m;
76 u32 p;
77 u32 q;
78 bool swallow;
79};
80
81enum {
82 CLOCK_CONFIG_16_2_M = 1,
83 CLOCK_CONFIG_16_368_M,
84 CLOCK_CONFIG_16_8_M,
85 CLOCK_CONFIG_19_2_M,
86 CLOCK_CONFIG_26_M,
87 CLOCK_CONFIG_32_736_M,
88 CLOCK_CONFIG_33_6_M,
89 CLOCK_CONFIG_38_468_M,
90 CLOCK_CONFIG_52_M,
91
92 NUM_CLOCK_CONFIGS,
93};
94
95#endif /* __WL18XX_PRIV_H__ */
diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index f3d6fa508269..ce108a736bd0 100644
--- a/drivers/net/wireless/ti/wlcore/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -70,7 +70,7 @@ int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth)
70 struct acx_sleep_auth *auth; 70 struct acx_sleep_auth *auth;
71 int ret; 71 int ret;
72 72
73 wl1271_debug(DEBUG_ACX, "acx sleep auth"); 73 wl1271_debug(DEBUG_ACX, "acx sleep auth %d", sleep_auth);
74 74
75 auth = kzalloc(sizeof(*auth), GFP_KERNEL); 75 auth = kzalloc(sizeof(*auth), GFP_KERNEL);
76 if (!auth) { 76 if (!auth) {
@@ -81,11 +81,18 @@ int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth)
81 auth->sleep_auth = sleep_auth; 81 auth->sleep_auth = sleep_auth;
82 82
83 ret = wl1271_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth)); 83 ret = wl1271_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth));
84 if (ret < 0) {
85 wl1271_error("could not configure sleep_auth to %d: %d",
86 sleep_auth, ret);
87 goto out;
88 }
84 89
90 wl->sleep_auth = sleep_auth;
85out: 91out:
86 kfree(auth); 92 kfree(auth);
87 return ret; 93 return ret;
88} 94}
95EXPORT_SYMBOL_GPL(wl1271_acx_sleep_auth);
89 96
90int wl1271_acx_tx_power(struct wl1271 *wl, struct wl12xx_vif *wlvif, 97int wl1271_acx_tx_power(struct wl1271 *wl, struct wl12xx_vif *wlvif,
91 int power) 98 int power)
@@ -708,14 +715,14 @@ out:
708 return ret; 715 return ret;
709} 716}
710 717
711int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats) 718int wl1271_acx_statistics(struct wl1271 *wl, void *stats)
712{ 719{
713 int ret; 720 int ret;
714 721
715 wl1271_debug(DEBUG_ACX, "acx statistics"); 722 wl1271_debug(DEBUG_ACX, "acx statistics");
716 723
717 ret = wl1271_cmd_interrogate(wl, ACX_STATISTICS, stats, 724 ret = wl1271_cmd_interrogate(wl, ACX_STATISTICS, stats,
718 sizeof(*stats)); 725 wl->stats.fw_stats_len);
719 if (ret < 0) { 726 if (ret < 0) {
720 wl1271_warning("acx statistics failed: %d", ret); 727 wl1271_warning("acx statistics failed: %d", ret);
721 return -ENOMEM; 728 return -ENOMEM;
@@ -997,6 +1004,7 @@ out:
997 kfree(mem_conf); 1004 kfree(mem_conf);
998 return ret; 1005 return ret;
999} 1006}
1007EXPORT_SYMBOL_GPL(wl12xx_acx_mem_cfg);
1000 1008
1001int wl1271_acx_init_mem_config(struct wl1271 *wl) 1009int wl1271_acx_init_mem_config(struct wl1271 *wl)
1002{ 1010{
@@ -1027,6 +1035,7 @@ int wl1271_acx_init_mem_config(struct wl1271 *wl)
1027 1035
1028 return 0; 1036 return 0;
1029} 1037}
1038EXPORT_SYMBOL_GPL(wl1271_acx_init_mem_config);
1030 1039
1031int wl1271_acx_init_rx_interrupt(struct wl1271 *wl) 1040int wl1271_acx_init_rx_interrupt(struct wl1271 *wl)
1032{ 1041{
@@ -1150,6 +1159,7 @@ out:
1150 kfree(acx); 1159 kfree(acx);
1151 return ret; 1160 return ret;
1152} 1161}
1162EXPORT_SYMBOL_GPL(wl1271_acx_pm_config);
1153 1163
1154int wl1271_acx_keep_alive_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, 1164int wl1271_acx_keep_alive_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1155 bool enable) 1165 bool enable)
diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h
index e6a74869a5ff..d03215d6b3bd 100644
--- a/drivers/net/wireless/ti/wlcore/acx.h
+++ b/drivers/net/wireless/ti/wlcore/acx.h
@@ -51,21 +51,18 @@
51#define WL1271_ACX_INTR_TRACE_A BIT(7) 51#define WL1271_ACX_INTR_TRACE_A BIT(7)
52/* Trace message on MBOX #B */ 52/* Trace message on MBOX #B */
53#define WL1271_ACX_INTR_TRACE_B BIT(8) 53#define WL1271_ACX_INTR_TRACE_B BIT(8)
54/* SW FW Initiated interrupt Watchdog timer expiration */
55#define WL1271_ACX_SW_INTR_WATCHDOG BIT(9)
54 56
55#define WL1271_ACX_INTR_ALL 0xFFFFFFFF 57#define WL1271_ACX_INTR_ALL 0xFFFFFFFF
56#define WL1271_ACX_ALL_EVENTS_VECTOR (WL1271_ACX_INTR_WATCHDOG | \ 58
57 WL1271_ACX_INTR_INIT_COMPLETE | \ 59/* all possible interrupts - only appropriate ones will be masked in */
58 WL1271_ACX_INTR_EVENT_A | \ 60#define WLCORE_ALL_INTR_MASK (WL1271_ACX_INTR_WATCHDOG | \
59 WL1271_ACX_INTR_EVENT_B | \ 61 WL1271_ACX_INTR_EVENT_A | \
60 WL1271_ACX_INTR_CMD_COMPLETE | \ 62 WL1271_ACX_INTR_EVENT_B | \
61 WL1271_ACX_INTR_HW_AVAILABLE | \ 63 WL1271_ACX_INTR_HW_AVAILABLE | \
62 WL1271_ACX_INTR_DATA) 64 WL1271_ACX_INTR_DATA | \
63 65 WL1271_ACX_SW_INTR_WATCHDOG)
64#define WL1271_INTR_MASK (WL1271_ACX_INTR_WATCHDOG | \
65 WL1271_ACX_INTR_EVENT_A | \
66 WL1271_ACX_INTR_EVENT_B | \
67 WL1271_ACX_INTR_HW_AVAILABLE | \
68 WL1271_ACX_INTR_DATA)
69 66
70/* Target's information element */ 67/* Target's information element */
71struct acx_header { 68struct acx_header {
@@ -121,6 +118,11 @@ enum wl1271_psm_mode {
121 118
122 /* Extreme low power */ 119 /* Extreme low power */
123 WL1271_PSM_ELP = 2, 120 WL1271_PSM_ELP = 2,
121
122 WL1271_PSM_MAX = WL1271_PSM_ELP,
123
124 /* illegal out of band value of PSM mode */
125 WL1271_PSM_ILLEGAL = 0xff
124}; 126};
125 127
126struct acx_sleep_auth { 128struct acx_sleep_auth {
@@ -417,228 +419,6 @@ struct acx_ctsprotect {
417 u8 padding[2]; 419 u8 padding[2];
418} __packed; 420} __packed;
419 421
420struct acx_tx_statistics {
421 __le32 internal_desc_overflow;
422} __packed;
423
424struct acx_rx_statistics {
425 __le32 out_of_mem;
426 __le32 hdr_overflow;
427 __le32 hw_stuck;
428 __le32 dropped;
429 __le32 fcs_err;
430 __le32 xfr_hint_trig;
431 __le32 path_reset;
432 __le32 reset_counter;
433} __packed;
434
435struct acx_dma_statistics {
436 __le32 rx_requested;
437 __le32 rx_errors;
438 __le32 tx_requested;
439 __le32 tx_errors;
440} __packed;
441
442struct acx_isr_statistics {
443 /* host command complete */
444 __le32 cmd_cmplt;
445
446 /* fiqisr() */
447 __le32 fiqs;
448
449 /* (INT_STS_ND & INT_TRIG_RX_HEADER) */
450 __le32 rx_headers;
451
452 /* (INT_STS_ND & INT_TRIG_RX_CMPLT) */
453 __le32 rx_completes;
454
455 /* (INT_STS_ND & INT_TRIG_NO_RX_BUF) */
456 __le32 rx_mem_overflow;
457
458 /* (INT_STS_ND & INT_TRIG_S_RX_RDY) */
459 __le32 rx_rdys;
460
461 /* irqisr() */
462 __le32 irqs;
463
464 /* (INT_STS_ND & INT_TRIG_TX_PROC) */
465 __le32 tx_procs;
466
467 /* (INT_STS_ND & INT_TRIG_DECRYPT_DONE) */
468 __le32 decrypt_done;
469
470 /* (INT_STS_ND & INT_TRIG_DMA0) */
471 __le32 dma0_done;
472
473 /* (INT_STS_ND & INT_TRIG_DMA1) */
474 __le32 dma1_done;
475
476 /* (INT_STS_ND & INT_TRIG_TX_EXC_CMPLT) */
477 __le32 tx_exch_complete;
478
479 /* (INT_STS_ND & INT_TRIG_COMMAND) */
480 __le32 commands;
481
482 /* (INT_STS_ND & INT_TRIG_RX_PROC) */
483 __le32 rx_procs;
484
485 /* (INT_STS_ND & INT_TRIG_PM_802) */
486 __le32 hw_pm_mode_changes;
487
488 /* (INT_STS_ND & INT_TRIG_ACKNOWLEDGE) */
489 __le32 host_acknowledges;
490
491 /* (INT_STS_ND & INT_TRIG_PM_PCI) */
492 __le32 pci_pm;
493
494 /* (INT_STS_ND & INT_TRIG_ACM_WAKEUP) */
495 __le32 wakeups;
496
497 /* (INT_STS_ND & INT_TRIG_LOW_RSSI) */
498 __le32 low_rssi;
499} __packed;
500
501struct acx_wep_statistics {
502 /* WEP address keys configured */
503 __le32 addr_key_count;
504
505 /* default keys configured */
506 __le32 default_key_count;
507
508 __le32 reserved;
509
510 /* number of times that WEP key not found on lookup */
511 __le32 key_not_found;
512
513 /* number of times that WEP key decryption failed */
514 __le32 decrypt_fail;
515
516 /* WEP packets decrypted */
517 __le32 packets;
518
519 /* WEP decrypt interrupts */
520 __le32 interrupt;
521} __packed;
522
523#define ACX_MISSED_BEACONS_SPREAD 10
524
525struct acx_pwr_statistics {
526 /* the amount of enters into power save mode (both PD & ELP) */
527 __le32 ps_enter;
528
529 /* the amount of enters into ELP mode */
530 __le32 elp_enter;
531
532 /* the amount of missing beacon interrupts to the host */
533 __le32 missing_bcns;
534
535 /* the amount of wake on host-access times */
536 __le32 wake_on_host;
537
538 /* the amount of wake on timer-expire */
539 __le32 wake_on_timer_exp;
540
541 /* the number of packets that were transmitted with PS bit set */
542 __le32 tx_with_ps;
543
544 /* the number of packets that were transmitted with PS bit clear */
545 __le32 tx_without_ps;
546
547 /* the number of received beacons */
548 __le32 rcvd_beacons;
549
550 /* the number of entering into PowerOn (power save off) */
551 __le32 power_save_off;
552
553 /* the number of entries into power save mode */
554 __le16 enable_ps;
555
556 /*
557 * the number of exits from power save, not including failed PS
558 * transitions
559 */
560 __le16 disable_ps;
561
562 /*
563 * the number of times the TSF counter was adjusted because
564 * of drift
565 */
566 __le32 fix_tsf_ps;
567
568 /* Gives statistics about the spread continuous missed beacons.
569 * The 16 LSB are dedicated for the PS mode.
570 * The 16 MSB are dedicated for the PS mode.
571 * cont_miss_bcns_spread[0] - single missed beacon.
572 * cont_miss_bcns_spread[1] - two continuous missed beacons.
573 * cont_miss_bcns_spread[2] - three continuous missed beacons.
574 * ...
575 * cont_miss_bcns_spread[9] - ten and more continuous missed beacons.
576 */
577 __le32 cont_miss_bcns_spread[ACX_MISSED_BEACONS_SPREAD];
578
579 /* the number of beacons in awake mode */
580 __le32 rcvd_awake_beacons;
581} __packed;
582
583struct acx_mic_statistics {
584 __le32 rx_pkts;
585 __le32 calc_failure;
586} __packed;
587
588struct acx_aes_statistics {
589 __le32 encrypt_fail;
590 __le32 decrypt_fail;
591 __le32 encrypt_packets;
592 __le32 decrypt_packets;
593 __le32 encrypt_interrupt;
594 __le32 decrypt_interrupt;
595} __packed;
596
597struct acx_event_statistics {
598 __le32 heart_beat;
599 __le32 calibration;
600 __le32 rx_mismatch;
601 __le32 rx_mem_empty;
602 __le32 rx_pool;
603 __le32 oom_late;
604 __le32 phy_transmit_error;
605 __le32 tx_stuck;
606} __packed;
607
608struct acx_ps_statistics {
609 __le32 pspoll_timeouts;
610 __le32 upsd_timeouts;
611 __le32 upsd_max_sptime;
612 __le32 upsd_max_apturn;
613 __le32 pspoll_max_apturn;
614 __le32 pspoll_utilization;
615 __le32 upsd_utilization;
616} __packed;
617
618struct acx_rxpipe_statistics {
619 __le32 rx_prep_beacon_drop;
620 __le32 descr_host_int_trig_rx_data;
621 __le32 beacon_buffer_thres_host_int_trig_rx_data;
622 __le32 missed_beacon_host_int_trig_rx_data;
623 __le32 tx_xfr_host_int_trig_rx_data;
624} __packed;
625
626struct acx_statistics {
627 struct acx_header header;
628
629 struct acx_tx_statistics tx;
630 struct acx_rx_statistics rx;
631 struct acx_dma_statistics dma;
632 struct acx_isr_statistics isr;
633 struct acx_wep_statistics wep;
634 struct acx_pwr_statistics pwr;
635 struct acx_aes_statistics aes;
636 struct acx_mic_statistics mic;
637 struct acx_event_statistics event;
638 struct acx_ps_statistics ps;
639 struct acx_rxpipe_statistics rxpipe;
640} __packed;
641
642struct acx_rate_class { 422struct acx_rate_class {
643 __le32 enabled_rates; 423 __le32 enabled_rates;
644 u8 short_retry_limit; 424 u8 short_retry_limit;
@@ -828,6 +608,8 @@ struct wl1271_acx_keep_alive_config {
828#define HOST_IF_CFG_RX_FIFO_ENABLE BIT(0) 608#define HOST_IF_CFG_RX_FIFO_ENABLE BIT(0)
829#define HOST_IF_CFG_TX_EXTRA_BLKS_SWAP BIT(1) 609#define HOST_IF_CFG_TX_EXTRA_BLKS_SWAP BIT(1)
830#define HOST_IF_CFG_TX_PAD_TO_SDIO_BLK BIT(3) 610#define HOST_IF_CFG_TX_PAD_TO_SDIO_BLK BIT(3)
611#define HOST_IF_CFG_RX_PAD_TO_SDIO_BLK BIT(4)
612#define HOST_IF_CFG_ADD_RX_ALIGNMENT BIT(6)
831 613
832enum { 614enum {
833 WL1271_ACX_TRIG_TYPE_LEVEL = 0, 615 WL1271_ACX_TRIG_TYPE_LEVEL = 0,
@@ -946,7 +728,7 @@ struct wl1271_acx_ht_information {
946 u8 padding[2]; 728 u8 padding[2];
947} __packed; 729} __packed;
948 730
949#define RX_BA_MAX_SESSIONS 2 731#define RX_BA_MAX_SESSIONS 3
950 732
951struct wl1271_acx_ba_initiator_policy { 733struct wl1271_acx_ba_initiator_policy {
952 struct acx_header header; 734 struct acx_header header;
@@ -1243,6 +1025,7 @@ enum {
1243 ACX_CONFIG_HANGOVER = 0x0042, 1025 ACX_CONFIG_HANGOVER = 0x0042,
1244 ACX_FEATURE_CFG = 0x0043, 1026 ACX_FEATURE_CFG = 0x0043,
1245 ACX_PROTECTION_CFG = 0x0044, 1027 ACX_PROTECTION_CFG = 0x0044,
1028 ACX_CHECKSUM_CONFIG = 0x0045,
1246}; 1029};
1247 1030
1248 1031
@@ -1281,7 +1064,7 @@ int wl1271_acx_set_preamble(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1281 enum acx_preamble_type preamble); 1064 enum acx_preamble_type preamble);
1282int wl1271_acx_cts_protect(struct wl1271 *wl, struct wl12xx_vif *wlvif, 1065int wl1271_acx_cts_protect(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1283 enum acx_ctsprotect_type ctsprotect); 1066 enum acx_ctsprotect_type ctsprotect);
1284int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats); 1067int wl1271_acx_statistics(struct wl1271 *wl, void *stats);
1285int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif); 1068int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif);
1286int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c, 1069int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c,
1287 u8 idx); 1070 u8 idx);
diff --git a/drivers/net/wireless/ti/wlcore/boot.c b/drivers/net/wireless/ti/wlcore/boot.c
index 9b98230f84ce..375ea574eafb 100644
--- a/drivers/net/wireless/ti/wlcore/boot.c
+++ b/drivers/net/wireless/ti/wlcore/boot.c
@@ -33,22 +33,35 @@
33#include "rx.h" 33#include "rx.h"
34#include "hw_ops.h" 34#include "hw_ops.h"
35 35
36static void wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag) 36static int wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag)
37{ 37{
38 u32 cpu_ctrl; 38 u32 cpu_ctrl;
39 int ret;
39 40
40 /* 10.5.0 run the firmware (I) */ 41 /* 10.5.0 run the firmware (I) */
41 cpu_ctrl = wlcore_read_reg(wl, REG_ECPU_CONTROL); 42 ret = wlcore_read_reg(wl, REG_ECPU_CONTROL, &cpu_ctrl);
43 if (ret < 0)
44 goto out;
42 45
43 /* 10.5.1 run the firmware (II) */ 46 /* 10.5.1 run the firmware (II) */
44 cpu_ctrl |= flag; 47 cpu_ctrl |= flag;
45 wlcore_write_reg(wl, REG_ECPU_CONTROL, cpu_ctrl); 48 ret = wlcore_write_reg(wl, REG_ECPU_CONTROL, cpu_ctrl);
49
50out:
51 return ret;
46} 52}
47 53
48static int wlcore_parse_fw_ver(struct wl1271 *wl) 54static int wlcore_boot_parse_fw_ver(struct wl1271 *wl,
55 struct wl1271_static_data *static_data)
49{ 56{
50 int ret; 57 int ret;
51 58
59 strncpy(wl->chip.fw_ver_str, static_data->fw_version,
60 sizeof(wl->chip.fw_ver_str));
61
62 /* make sure the string is NULL-terminated */
63 wl->chip.fw_ver_str[sizeof(wl->chip.fw_ver_str) - 1] = '\0';
64
52 ret = sscanf(wl->chip.fw_ver_str + 4, "%u.%u.%u.%u.%u", 65 ret = sscanf(wl->chip.fw_ver_str + 4, "%u.%u.%u.%u.%u",
53 &wl->chip.fw_ver[0], &wl->chip.fw_ver[1], 66 &wl->chip.fw_ver[0], &wl->chip.fw_ver[1],
54 &wl->chip.fw_ver[2], &wl->chip.fw_ver[3], 67 &wl->chip.fw_ver[2], &wl->chip.fw_ver[3],
@@ -57,43 +70,96 @@ static int wlcore_parse_fw_ver(struct wl1271 *wl)
57 if (ret != 5) { 70 if (ret != 5) {
58 wl1271_warning("fw version incorrect value"); 71 wl1271_warning("fw version incorrect value");
59 memset(wl->chip.fw_ver, 0, sizeof(wl->chip.fw_ver)); 72 memset(wl->chip.fw_ver, 0, sizeof(wl->chip.fw_ver));
60 return -EINVAL; 73 ret = -EINVAL;
74 goto out;
61 } 75 }
62 76
63 ret = wlcore_identify_fw(wl); 77 ret = wlcore_identify_fw(wl);
64 if (ret < 0) 78 if (ret < 0)
65 return ret; 79 goto out;
80out:
81 return ret;
82}
83
84static int wlcore_validate_fw_ver(struct wl1271 *wl)
85{
86 unsigned int *fw_ver = wl->chip.fw_ver;
87 unsigned int *min_ver = wl->min_fw_ver;
66 88
89 /* the chip must be exactly equal */
90 if (min_ver[FW_VER_CHIP] != fw_ver[FW_VER_CHIP])
91 goto fail;
92
93 /* always check the next digit if all previous ones are equal */
94
95 if (min_ver[FW_VER_IF_TYPE] < fw_ver[FW_VER_IF_TYPE])
96 goto out;
97 else if (min_ver[FW_VER_IF_TYPE] > fw_ver[FW_VER_IF_TYPE])
98 goto fail;
99
100 if (min_ver[FW_VER_MAJOR] < fw_ver[FW_VER_MAJOR])
101 goto out;
102 else if (min_ver[FW_VER_MAJOR] > fw_ver[FW_VER_MAJOR])
103 goto fail;
104
105 if (min_ver[FW_VER_SUBTYPE] < fw_ver[FW_VER_SUBTYPE])
106 goto out;
107 else if (min_ver[FW_VER_SUBTYPE] > fw_ver[FW_VER_SUBTYPE])
108 goto fail;
109
110 if (min_ver[FW_VER_MINOR] < fw_ver[FW_VER_MINOR])
111 goto out;
112 else if (min_ver[FW_VER_MINOR] > fw_ver[FW_VER_MINOR])
113 goto fail;
114
115out:
67 return 0; 116 return 0;
117
118fail:
119 wl1271_error("Your WiFi FW version (%u.%u.%u.%u.%u) is outdated.\n"
120 "Please use at least FW %u.%u.%u.%u.%u.\n"
121 "You can get more information at:\n"
122 "http://wireless.kernel.org/en/users/Drivers/wl12xx",
123 fw_ver[FW_VER_CHIP], fw_ver[FW_VER_IF_TYPE],
124 fw_ver[FW_VER_MAJOR], fw_ver[FW_VER_SUBTYPE],
125 fw_ver[FW_VER_MINOR], min_ver[FW_VER_CHIP],
126 min_ver[FW_VER_IF_TYPE], min_ver[FW_VER_MAJOR],
127 min_ver[FW_VER_SUBTYPE], min_ver[FW_VER_MINOR]);
128 return -EINVAL;
68} 129}
69 130
70static int wlcore_boot_fw_version(struct wl1271 *wl) 131static int wlcore_boot_static_data(struct wl1271 *wl)
71{ 132{
72 struct wl1271_static_data *static_data; 133 struct wl1271_static_data *static_data;
134 size_t len = sizeof(*static_data) + wl->static_data_priv_len;
73 int ret; 135 int ret;
74 136
75 static_data = kmalloc(sizeof(*static_data), GFP_KERNEL | GFP_DMA); 137 static_data = kmalloc(len, GFP_KERNEL);
76 if (!static_data) { 138 if (!static_data) {
77 wl1271_error("Couldn't allocate memory for static data!"); 139 ret = -ENOMEM;
78 return -ENOMEM; 140 goto out;
79 } 141 }
80 142
81 wl1271_read(wl, wl->cmd_box_addr, static_data, sizeof(*static_data), 143 ret = wlcore_read(wl, wl->cmd_box_addr, static_data, len, false);
82 false); 144 if (ret < 0)
83 145 goto out_free;
84 strncpy(wl->chip.fw_ver_str, static_data->fw_version,
85 sizeof(wl->chip.fw_ver_str));
86 146
87 kfree(static_data); 147 ret = wlcore_boot_parse_fw_ver(wl, static_data);
148 if (ret < 0)
149 goto out_free;
88 150
89 /* make sure the string is NULL-terminated */ 151 ret = wlcore_validate_fw_ver(wl);
90 wl->chip.fw_ver_str[sizeof(wl->chip.fw_ver_str) - 1] = '\0'; 152 if (ret < 0)
153 goto out_free;
91 154
92 ret = wlcore_parse_fw_ver(wl); 155 ret = wlcore_handle_static_data(wl, static_data);
93 if (ret < 0) 156 if (ret < 0)
94 return ret; 157 goto out_free;
95 158
96 return 0; 159out_free:
160 kfree(static_data);
161out:
162 return ret;
97} 163}
98 164
99static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf, 165static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
@@ -102,6 +168,7 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
102 struct wlcore_partition_set partition; 168 struct wlcore_partition_set partition;
103 int addr, chunk_num, partition_limit; 169 int addr, chunk_num, partition_limit;
104 u8 *p, *chunk; 170 u8 *p, *chunk;
171 int ret;
105 172
106 /* whal_FwCtrl_LoadFwImageSm() */ 173 /* whal_FwCtrl_LoadFwImageSm() */
107 174
@@ -123,7 +190,9 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
123 190
124 memcpy(&partition, &wl->ptable[PART_DOWN], sizeof(partition)); 191 memcpy(&partition, &wl->ptable[PART_DOWN], sizeof(partition));
125 partition.mem.start = dest; 192 partition.mem.start = dest;
126 wlcore_set_partition(wl, &partition); 193 ret = wlcore_set_partition(wl, &partition);
194 if (ret < 0)
195 goto out;
127 196
128 /* 10.1 set partition limit and chunk num */ 197 /* 10.1 set partition limit and chunk num */
129 chunk_num = 0; 198 chunk_num = 0;
@@ -137,7 +206,9 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
137 partition_limit = chunk_num * CHUNK_SIZE + 206 partition_limit = chunk_num * CHUNK_SIZE +
138 wl->ptable[PART_DOWN].mem.size; 207 wl->ptable[PART_DOWN].mem.size;
139 partition.mem.start = addr; 208 partition.mem.start = addr;
140 wlcore_set_partition(wl, &partition); 209 ret = wlcore_set_partition(wl, &partition);
210 if (ret < 0)
211 goto out;
141 } 212 }
142 213
143 /* 10.3 upload the chunk */ 214 /* 10.3 upload the chunk */
@@ -146,7 +217,9 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
146 memcpy(chunk, p, CHUNK_SIZE); 217 memcpy(chunk, p, CHUNK_SIZE);
147 wl1271_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x", 218 wl1271_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x",
148 p, addr); 219 p, addr);
149 wl1271_write(wl, addr, chunk, CHUNK_SIZE, false); 220 ret = wlcore_write(wl, addr, chunk, CHUNK_SIZE, false);
221 if (ret < 0)
222 goto out;
150 223
151 chunk_num++; 224 chunk_num++;
152 } 225 }
@@ -157,10 +230,11 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
157 memcpy(chunk, p, fw_data_len % CHUNK_SIZE); 230 memcpy(chunk, p, fw_data_len % CHUNK_SIZE);
158 wl1271_debug(DEBUG_BOOT, "uploading fw last chunk (%zd B) 0x%p to 0x%x", 231 wl1271_debug(DEBUG_BOOT, "uploading fw last chunk (%zd B) 0x%p to 0x%x",
159 fw_data_len % CHUNK_SIZE, p, addr); 232 fw_data_len % CHUNK_SIZE, p, addr);
160 wl1271_write(wl, addr, chunk, fw_data_len % CHUNK_SIZE, false); 233 ret = wlcore_write(wl, addr, chunk, fw_data_len % CHUNK_SIZE, false);
161 234
235out:
162 kfree(chunk); 236 kfree(chunk);
163 return 0; 237 return ret;
164} 238}
165 239
166int wlcore_boot_upload_firmware(struct wl1271 *wl) 240int wlcore_boot_upload_firmware(struct wl1271 *wl)
@@ -203,9 +277,12 @@ int wlcore_boot_upload_nvs(struct wl1271 *wl)
203 int i; 277 int i;
204 u32 dest_addr, val; 278 u32 dest_addr, val;
205 u8 *nvs_ptr, *nvs_aligned; 279 u8 *nvs_ptr, *nvs_aligned;
280 int ret;
206 281
207 if (wl->nvs == NULL) 282 if (wl->nvs == NULL) {
283 wl1271_error("NVS file is needed during boot");
208 return -ENODEV; 284 return -ENODEV;
285 }
209 286
210 if (wl->quirks & WLCORE_QUIRK_LEGACY_NVS) { 287 if (wl->quirks & WLCORE_QUIRK_LEGACY_NVS) {
211 struct wl1271_nvs_file *nvs = 288 struct wl1271_nvs_file *nvs =
@@ -298,7 +375,9 @@ int wlcore_boot_upload_nvs(struct wl1271 *wl)
298 wl1271_debug(DEBUG_BOOT, 375 wl1271_debug(DEBUG_BOOT,
299 "nvs burst write 0x%x: 0x%x", 376 "nvs burst write 0x%x: 0x%x",
300 dest_addr, val); 377 dest_addr, val);
301 wl1271_write32(wl, dest_addr, val); 378 ret = wlcore_write32(wl, dest_addr, val);
379 if (ret < 0)
380 return ret;
302 381
303 nvs_ptr += 4; 382 nvs_ptr += 4;
304 dest_addr += 4; 383 dest_addr += 4;
@@ -324,7 +403,9 @@ int wlcore_boot_upload_nvs(struct wl1271 *wl)
324 nvs_len -= nvs_ptr - (u8 *)wl->nvs; 403 nvs_len -= nvs_ptr - (u8 *)wl->nvs;
325 404
326 /* Now we must set the partition correctly */ 405 /* Now we must set the partition correctly */
327 wlcore_set_partition(wl, &wl->ptable[PART_WORK]); 406 ret = wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
407 if (ret < 0)
408 return ret;
328 409
329 /* Copy the NVS tables to a new block to ensure alignment */ 410 /* Copy the NVS tables to a new block to ensure alignment */
330 nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL); 411 nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL);
@@ -332,11 +413,11 @@ int wlcore_boot_upload_nvs(struct wl1271 *wl)
332 return -ENOMEM; 413 return -ENOMEM;
333 414
334 /* And finally we upload the NVS tables */ 415 /* And finally we upload the NVS tables */
335 wlcore_write_data(wl, REG_CMD_MBOX_ADDRESS, 416 ret = wlcore_write_data(wl, REG_CMD_MBOX_ADDRESS, nvs_aligned, nvs_len,
336 nvs_aligned, nvs_len, false); 417 false);
337 418
338 kfree(nvs_aligned); 419 kfree(nvs_aligned);
339 return 0; 420 return ret;
340 421
341out_badnvs: 422out_badnvs:
342 wl1271_error("nvs data is malformed"); 423 wl1271_error("nvs data is malformed");
@@ -350,11 +431,17 @@ int wlcore_boot_run_firmware(struct wl1271 *wl)
350 u32 chip_id, intr; 431 u32 chip_id, intr;
351 432
352 /* Make sure we have the boot partition */ 433 /* Make sure we have the boot partition */
353 wlcore_set_partition(wl, &wl->ptable[PART_BOOT]); 434 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
435 if (ret < 0)
436 return ret;
354 437
355 wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT); 438 ret = wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT);
439 if (ret < 0)
440 return ret;
356 441
357 chip_id = wlcore_read_reg(wl, REG_CHIP_ID_B); 442 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &chip_id);
443 if (ret < 0)
444 return ret;
358 445
359 wl1271_debug(DEBUG_BOOT, "chip id after firmware boot: 0x%x", chip_id); 446 wl1271_debug(DEBUG_BOOT, "chip id after firmware boot: 0x%x", chip_id);
360 447
@@ -367,7 +454,9 @@ int wlcore_boot_run_firmware(struct wl1271 *wl)
367 loop = 0; 454 loop = 0;
368 while (loop++ < INIT_LOOP) { 455 while (loop++ < INIT_LOOP) {
369 udelay(INIT_LOOP_DELAY); 456 udelay(INIT_LOOP_DELAY);
370 intr = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR); 457 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &intr);
458 if (ret < 0)
459 return ret;
371 460
372 if (intr == 0xffffffff) { 461 if (intr == 0xffffffff) {
373 wl1271_error("error reading hardware complete " 462 wl1271_error("error reading hardware complete "
@@ -376,8 +465,10 @@ int wlcore_boot_run_firmware(struct wl1271 *wl)
376 } 465 }
377 /* check that ACX_INTR_INIT_COMPLETE is enabled */ 466 /* check that ACX_INTR_INIT_COMPLETE is enabled */
378 else if (intr & WL1271_ACX_INTR_INIT_COMPLETE) { 467 else if (intr & WL1271_ACX_INTR_INIT_COMPLETE) {
379 wlcore_write_reg(wl, REG_INTERRUPT_ACK, 468 ret = wlcore_write_reg(wl, REG_INTERRUPT_ACK,
380 WL1271_ACX_INTR_INIT_COMPLETE); 469 WL1271_ACX_INTR_INIT_COMPLETE);
470 if (ret < 0)
471 return ret;
381 break; 472 break;
382 } 473 }
383 } 474 }
@@ -389,20 +480,25 @@ int wlcore_boot_run_firmware(struct wl1271 *wl)
389 } 480 }
390 481
391 /* get hardware config command mail box */ 482 /* get hardware config command mail box */
392 wl->cmd_box_addr = wlcore_read_reg(wl, REG_COMMAND_MAILBOX_PTR); 483 ret = wlcore_read_reg(wl, REG_COMMAND_MAILBOX_PTR, &wl->cmd_box_addr);
484 if (ret < 0)
485 return ret;
393 486
394 wl1271_debug(DEBUG_MAILBOX, "cmd_box_addr 0x%x", wl->cmd_box_addr); 487 wl1271_debug(DEBUG_MAILBOX, "cmd_box_addr 0x%x", wl->cmd_box_addr);
395 488
396 /* get hardware config event mail box */ 489 /* get hardware config event mail box */
397 wl->mbox_ptr[0] = wlcore_read_reg(wl, REG_EVENT_MAILBOX_PTR); 490 ret = wlcore_read_reg(wl, REG_EVENT_MAILBOX_PTR, &wl->mbox_ptr[0]);
491 if (ret < 0)
492 return ret;
493
398 wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox); 494 wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox);
399 495
400 wl1271_debug(DEBUG_MAILBOX, "MBOX ptrs: 0x%x 0x%x", 496 wl1271_debug(DEBUG_MAILBOX, "MBOX ptrs: 0x%x 0x%x",
401 wl->mbox_ptr[0], wl->mbox_ptr[1]); 497 wl->mbox_ptr[0], wl->mbox_ptr[1]);
402 498
403 ret = wlcore_boot_fw_version(wl); 499 ret = wlcore_boot_static_data(wl);
404 if (ret < 0) { 500 if (ret < 0) {
405 wl1271_error("couldn't boot firmware"); 501 wl1271_error("error getting static data");
406 return ret; 502 return ret;
407 } 503 }
408 504
@@ -436,9 +532,9 @@ int wlcore_boot_run_firmware(struct wl1271 *wl)
436 } 532 }
437 533
438 /* set the working partition to its "running" mode offset */ 534 /* set the working partition to its "running" mode offset */
439 wlcore_set_partition(wl, &wl->ptable[PART_WORK]); 535 ret = wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
440 536
441 /* firmware startup completed */ 537 /* firmware startup completed */
442 return 0; 538 return ret;
443} 539}
444EXPORT_SYMBOL_GPL(wlcore_boot_run_firmware); 540EXPORT_SYMBOL_GPL(wlcore_boot_run_firmware);
diff --git a/drivers/net/wireless/ti/wlcore/boot.h b/drivers/net/wireless/ti/wlcore/boot.h
index 094981dd2227..a525225f990c 100644
--- a/drivers/net/wireless/ti/wlcore/boot.h
+++ b/drivers/net/wireless/ti/wlcore/boot.h
@@ -40,6 +40,7 @@ struct wl1271_static_data {
40 u8 fw_version[WL1271_FW_VERSION_MAX_LEN]; 40 u8 fw_version[WL1271_FW_VERSION_MAX_LEN];
41 u32 hw_version; 41 u32 hw_version;
42 u8 tx_power_table[WL1271_NO_SUBBANDS][WL1271_NO_POWER_LEVELS]; 42 u8 tx_power_table[WL1271_NO_SUBBANDS][WL1271_NO_POWER_LEVELS];
43 u8 priv[0];
43}; 44};
44 45
45/* number of times we try to read the INIT interrupt */ 46/* number of times we try to read the INIT interrupt */
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 5b128a971449..a23949cdaebc 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -36,8 +36,10 @@
36#include "cmd.h" 36#include "cmd.h"
37#include "event.h" 37#include "event.h"
38#include "tx.h" 38#include "tx.h"
39#include "hw_ops.h"
39 40
40#define WL1271_CMD_FAST_POLL_COUNT 50 41#define WL1271_CMD_FAST_POLL_COUNT 50
42#define WL1271_WAIT_EVENT_FAST_POLL_COUNT 20
41 43
42/* 44/*
43 * send command to firmware 45 * send command to firmware
@@ -64,17 +66,24 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
64 WARN_ON(len % 4 != 0); 66 WARN_ON(len % 4 != 0);
65 WARN_ON(test_bit(WL1271_FLAG_IN_ELP, &wl->flags)); 67 WARN_ON(test_bit(WL1271_FLAG_IN_ELP, &wl->flags));
66 68
67 wl1271_write(wl, wl->cmd_box_addr, buf, len, false); 69 ret = wlcore_write(wl, wl->cmd_box_addr, buf, len, false);
70 if (ret < 0)
71 goto fail;
68 72
69 /* 73 /*
70 * TODO: we just need this because one bit is in a different 74 * TODO: we just need this because one bit is in a different
71 * place. Is there any better way? 75 * place. Is there any better way?
72 */ 76 */
73 wl->ops->trigger_cmd(wl, wl->cmd_box_addr, buf, len); 77 ret = wl->ops->trigger_cmd(wl, wl->cmd_box_addr, buf, len);
78 if (ret < 0)
79 goto fail;
74 80
75 timeout = jiffies + msecs_to_jiffies(WL1271_COMMAND_TIMEOUT); 81 timeout = jiffies + msecs_to_jiffies(WL1271_COMMAND_TIMEOUT);
76 82
77 intr = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR); 83 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &intr);
84 if (ret < 0)
85 goto fail;
86
78 while (!(intr & WL1271_ACX_INTR_CMD_COMPLETE)) { 87 while (!(intr & WL1271_ACX_INTR_CMD_COMPLETE)) {
79 if (time_after(jiffies, timeout)) { 88 if (time_after(jiffies, timeout)) {
80 wl1271_error("command complete timeout"); 89 wl1271_error("command complete timeout");
@@ -88,13 +97,18 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
88 else 97 else
89 msleep(1); 98 msleep(1);
90 99
91 intr = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR); 100 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &intr);
101 if (ret < 0)
102 goto fail;
92 } 103 }
93 104
94 /* read back the status code of the command */ 105 /* read back the status code of the command */
95 if (res_len == 0) 106 if (res_len == 0)
96 res_len = sizeof(struct wl1271_cmd_header); 107 res_len = sizeof(struct wl1271_cmd_header);
97 wl1271_read(wl, wl->cmd_box_addr, cmd, res_len, false); 108
109 ret = wlcore_read(wl, wl->cmd_box_addr, cmd, res_len, false);
110 if (ret < 0)
111 goto fail;
98 112
99 status = le16_to_cpu(cmd->status); 113 status = le16_to_cpu(cmd->status);
100 if (status != CMD_STATUS_SUCCESS) { 114 if (status != CMD_STATUS_SUCCESS) {
@@ -103,11 +117,14 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
103 goto fail; 117 goto fail;
104 } 118 }
105 119
106 wlcore_write_reg(wl, REG_INTERRUPT_ACK, WL1271_ACX_INTR_CMD_COMPLETE); 120 ret = wlcore_write_reg(wl, REG_INTERRUPT_ACK,
121 WL1271_ACX_INTR_CMD_COMPLETE);
122 if (ret < 0)
123 goto fail;
124
107 return 0; 125 return 0;
108 126
109fail: 127fail:
110 WARN_ON(1);
111 wl12xx_queue_recovery_work(wl); 128 wl12xx_queue_recovery_work(wl);
112 return ret; 129 return ret;
113} 130}
@@ -116,35 +133,50 @@ fail:
116 * Poll the mailbox event field until any of the bits in the mask is set or a 133 * Poll the mailbox event field until any of the bits in the mask is set or a
117 * timeout occurs (WL1271_EVENT_TIMEOUT in msecs) 134 * timeout occurs (WL1271_EVENT_TIMEOUT in msecs)
118 */ 135 */
119static int wl1271_cmd_wait_for_event_or_timeout(struct wl1271 *wl, u32 mask) 136static int wl1271_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
137 u32 mask, bool *timeout)
120{ 138{
121 u32 *events_vector; 139 u32 *events_vector;
122 u32 event; 140 u32 event;
123 unsigned long timeout; 141 unsigned long timeout_time;
142 u16 poll_count = 0;
124 int ret = 0; 143 int ret = 0;
125 144
145 *timeout = false;
146
126 events_vector = kmalloc(sizeof(*events_vector), GFP_KERNEL | GFP_DMA); 147 events_vector = kmalloc(sizeof(*events_vector), GFP_KERNEL | GFP_DMA);
127 if (!events_vector) 148 if (!events_vector)
128 return -ENOMEM; 149 return -ENOMEM;
129 150
130 timeout = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT); 151 timeout_time = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
131 152
132 do { 153 do {
133 if (time_after(jiffies, timeout)) { 154 if (time_after(jiffies, timeout_time)) {
134 wl1271_debug(DEBUG_CMD, "timeout waiting for event %d", 155 wl1271_debug(DEBUG_CMD, "timeout waiting for event %d",
135 (int)mask); 156 (int)mask);
136 ret = -ETIMEDOUT; 157 *timeout = true;
137 goto out; 158 goto out;
138 } 159 }
139 160
140 msleep(1); 161 poll_count++;
162 if (poll_count < WL1271_WAIT_EVENT_FAST_POLL_COUNT)
163 usleep_range(50, 51);
164 else
165 usleep_range(1000, 5000);
141 166
142 /* read from both event fields */ 167 /* read from both event fields */
143 wl1271_read(wl, wl->mbox_ptr[0], events_vector, 168 ret = wlcore_read(wl, wl->mbox_ptr[0], events_vector,
144 sizeof(*events_vector), false); 169 sizeof(*events_vector), false);
170 if (ret < 0)
171 goto out;
172
145 event = *events_vector & mask; 173 event = *events_vector & mask;
146 wl1271_read(wl, wl->mbox_ptr[1], events_vector, 174
147 sizeof(*events_vector), false); 175 ret = wlcore_read(wl, wl->mbox_ptr[1], events_vector,
176 sizeof(*events_vector), false);
177 if (ret < 0)
178 goto out;
179
148 event |= *events_vector & mask; 180 event |= *events_vector & mask;
149 } while (!event); 181 } while (!event);
150 182
@@ -156,9 +188,10 @@ out:
156static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask) 188static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
157{ 189{
158 int ret; 190 int ret;
191 bool timeout = false;
159 192
160 ret = wl1271_cmd_wait_for_event_or_timeout(wl, mask); 193 ret = wl1271_cmd_wait_for_event_or_timeout(wl, mask, &timeout);
161 if (ret != 0) { 194 if (ret != 0 || timeout) {
162 wl12xx_queue_recovery_work(wl); 195 wl12xx_queue_recovery_work(wl);
163 return ret; 196 return ret;
164 } 197 }
@@ -291,6 +324,23 @@ static int wl12xx_get_new_session_id(struct wl1271 *wl,
291 return wlvif->session_counter; 324 return wlvif->session_counter;
292} 325}
293 326
327static u8 wlcore_get_native_channel_type(u8 nl_channel_type)
328{
329 switch (nl_channel_type) {
330 case NL80211_CHAN_NO_HT:
331 return WLCORE_CHAN_NO_HT;
332 case NL80211_CHAN_HT20:
333 return WLCORE_CHAN_HT20;
334 case NL80211_CHAN_HT40MINUS:
335 return WLCORE_CHAN_HT40MINUS;
336 case NL80211_CHAN_HT40PLUS:
337 return WLCORE_CHAN_HT40PLUS;
338 default:
339 WARN_ON(1);
340 return WLCORE_CHAN_NO_HT;
341 }
342}
343
294static int wl12xx_cmd_role_start_dev(struct wl1271 *wl, 344static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
295 struct wl12xx_vif *wlvif) 345 struct wl12xx_vif *wlvif)
296{ 346{
@@ -407,6 +457,7 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
407 memcpy(cmd->sta.ssid, wlvif->ssid, wlvif->ssid_len); 457 memcpy(cmd->sta.ssid, wlvif->ssid, wlvif->ssid_len);
408 memcpy(cmd->sta.bssid, vif->bss_conf.bssid, ETH_ALEN); 458 memcpy(cmd->sta.bssid, vif->bss_conf.bssid, ETH_ALEN);
409 cmd->sta.local_rates = cpu_to_le32(wlvif->rate_set); 459 cmd->sta.local_rates = cpu_to_le32(wlvif->rate_set);
460 cmd->channel_type = wlcore_get_native_channel_type(wlvif->channel_type);
410 461
411 if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) { 462 if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) {
412 ret = wl12xx_allocate_link(wl, wlvif, &wlvif->sta.hlid); 463 ret = wl12xx_allocate_link(wl, wlvif, &wlvif->sta.hlid);
@@ -482,6 +533,7 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
482 struct wl12xx_cmd_role_start *cmd; 533 struct wl12xx_cmd_role_start *cmd;
483 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); 534 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
484 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 535 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
536 u32 supported_rates;
485 int ret; 537 int ret;
486 538
487 wl1271_debug(DEBUG_CMD, "cmd role start ap %d", wlvif->role_id); 539 wl1271_debug(DEBUG_CMD, "cmd role start ap %d", wlvif->role_id);
@@ -519,6 +571,7 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
519 /* FIXME: Change when adding DFS */ 571 /* FIXME: Change when adding DFS */
520 cmd->ap.reset_tsf = 1; /* By default reset AP TSF */ 572 cmd->ap.reset_tsf = 1; /* By default reset AP TSF */
521 cmd->channel = wlvif->channel; 573 cmd->channel = wlvif->channel;
574 cmd->channel_type = wlcore_get_native_channel_type(wlvif->channel_type);
522 575
523 if (!bss_conf->hidden_ssid) { 576 if (!bss_conf->hidden_ssid) {
524 /* take the SSID from the beacon for backward compatibility */ 577 /* take the SSID from the beacon for backward compatibility */
@@ -531,7 +584,13 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
531 memcpy(cmd->ap.ssid, bss_conf->ssid, bss_conf->ssid_len); 584 memcpy(cmd->ap.ssid, bss_conf->ssid, bss_conf->ssid_len);
532 } 585 }
533 586
534 cmd->ap.local_rates = cpu_to_le32(0xffffffff); 587 supported_rates = CONF_TX_AP_ENABLED_RATES | CONF_TX_MCS_RATES |
588 wlcore_hw_ap_get_mimo_wide_rate_mask(wl, wlvif);
589
590 wl1271_debug(DEBUG_CMD, "cmd role start ap with supported_rates 0x%08x",
591 supported_rates);
592
593 cmd->ap.local_rates = cpu_to_le32(supported_rates);
535 594
536 switch (wlvif->band) { 595 switch (wlvif->band) {
537 case IEEE80211_BAND_2GHZ: 596 case IEEE80211_BAND_2GHZ:
@@ -797,6 +856,7 @@ out:
797 kfree(cmd); 856 kfree(cmd);
798 return ret; 857 return ret;
799} 858}
859EXPORT_SYMBOL_GPL(wl1271_cmd_data_path);
800 860
801int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, 861int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
802 u8 ps_mode, u16 auto_ps_timeout) 862 u8 ps_mode, u16 auto_ps_timeout)
@@ -953,12 +1013,14 @@ out:
953int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif, 1013int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
954 u8 role_id, u8 band, 1014 u8 role_id, u8 band,
955 const u8 *ssid, size_t ssid_len, 1015 const u8 *ssid, size_t ssid_len,
956 const u8 *ie, size_t ie_len) 1016 const u8 *ie, size_t ie_len, bool sched_scan)
957{ 1017{
958 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); 1018 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
959 struct sk_buff *skb; 1019 struct sk_buff *skb;
960 int ret; 1020 int ret;
961 u32 rate; 1021 u32 rate;
1022 u16 template_id_2_4 = CMD_TEMPL_CFG_PROBE_REQ_2_4;
1023 u16 template_id_5 = CMD_TEMPL_CFG_PROBE_REQ_5;
962 1024
963 skb = ieee80211_probereq_get(wl->hw, vif, ssid, ssid_len, 1025 skb = ieee80211_probereq_get(wl->hw, vif, ssid, ssid_len,
964 ie, ie_len); 1026 ie, ie_len);
@@ -969,14 +1031,20 @@ int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
969 1031
970 wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", skb->data, skb->len); 1032 wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", skb->data, skb->len);
971 1033
1034 if (!sched_scan &&
1035 (wl->quirks & WLCORE_QUIRK_DUAL_PROBE_TMPL)) {
1036 template_id_2_4 = CMD_TEMPL_APP_PROBE_REQ_2_4;
1037 template_id_5 = CMD_TEMPL_APP_PROBE_REQ_5;
1038 }
1039
972 rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]); 1040 rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
973 if (band == IEEE80211_BAND_2GHZ) 1041 if (band == IEEE80211_BAND_2GHZ)
974 ret = wl1271_cmd_template_set(wl, role_id, 1042 ret = wl1271_cmd_template_set(wl, role_id,
975 CMD_TEMPL_CFG_PROBE_REQ_2_4, 1043 template_id_2_4,
976 skb->data, skb->len, 0, rate); 1044 skb->data, skb->len, 0, rate);
977 else 1045 else
978 ret = wl1271_cmd_template_set(wl, role_id, 1046 ret = wl1271_cmd_template_set(wl, role_id,
979 CMD_TEMPL_CFG_PROBE_REQ_5, 1047 template_id_5,
980 skb->data, skb->len, 0, rate); 1048 skb->data, skb->len, 0, rate);
981 1049
982out: 1050out:
@@ -1018,7 +1086,7 @@ out:
1018 1086
1019int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif) 1087int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1020{ 1088{
1021 int ret, extra; 1089 int ret, extra = 0;
1022 u16 fc; 1090 u16 fc;
1023 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); 1091 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
1024 struct sk_buff *skb; 1092 struct sk_buff *skb;
@@ -1057,7 +1125,8 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1057 /* encryption space */ 1125 /* encryption space */
1058 switch (wlvif->encryption_type) { 1126 switch (wlvif->encryption_type) {
1059 case KEY_TKIP: 1127 case KEY_TKIP:
1060 extra = WL1271_EXTRA_SPACE_TKIP; 1128 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
1129 extra = WL1271_EXTRA_SPACE_TKIP;
1061 break; 1130 break;
1062 case KEY_AES: 1131 case KEY_AES:
1063 extra = WL1271_EXTRA_SPACE_AES; 1132 extra = WL1271_EXTRA_SPACE_AES;
@@ -1346,13 +1415,18 @@ int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1346 1415
1347 for (i = 0; i < NUM_ACCESS_CATEGORIES_COPY; i++) 1416 for (i = 0; i < NUM_ACCESS_CATEGORIES_COPY; i++)
1348 if (sta->wme && (sta->uapsd_queues & BIT(i))) 1417 if (sta->wme && (sta->uapsd_queues & BIT(i)))
1349 cmd->psd_type[i] = WL1271_PSD_UPSD_TRIGGER; 1418 cmd->psd_type[NUM_ACCESS_CATEGORIES_COPY-1-i] =
1419 WL1271_PSD_UPSD_TRIGGER;
1350 else 1420 else
1351 cmd->psd_type[i] = WL1271_PSD_LEGACY; 1421 cmd->psd_type[NUM_ACCESS_CATEGORIES_COPY-1-i] =
1422 WL1271_PSD_LEGACY;
1423
1352 1424
1353 sta_rates = sta->supp_rates[wlvif->band]; 1425 sta_rates = sta->supp_rates[wlvif->band];
1354 if (sta->ht_cap.ht_supported) 1426 if (sta->ht_cap.ht_supported)
1355 sta_rates |= sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET; 1427 sta_rates |=
1428 (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET) |
1429 (sta->ht_cap.mcs.rx_mask[1] << HW_MIMO_RATES_OFFSET);
1356 1430
1357 cmd->supported_rates = 1431 cmd->supported_rates =
1358 cpu_to_le32(wl1271_tx_enabled_rates_get(wl, sta_rates, 1432 cpu_to_le32(wl1271_tx_enabled_rates_get(wl, sta_rates,
@@ -1378,6 +1452,7 @@ int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid)
1378{ 1452{
1379 struct wl12xx_cmd_remove_peer *cmd; 1453 struct wl12xx_cmd_remove_peer *cmd;
1380 int ret; 1454 int ret;
1455 bool timeout = false;
1381 1456
1382 wl1271_debug(DEBUG_CMD, "cmd remove peer %d", (int)hlid); 1457 wl1271_debug(DEBUG_CMD, "cmd remove peer %d", (int)hlid);
1383 1458
@@ -1398,12 +1473,16 @@ int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid)
1398 goto out_free; 1473 goto out_free;
1399 } 1474 }
1400 1475
1476 ret = wl1271_cmd_wait_for_event_or_timeout(wl,
1477 PEER_REMOVE_COMPLETE_EVENT_ID,
1478 &timeout);
1401 /* 1479 /*
1402 * We are ok with a timeout here. The event is sometimes not sent 1480 * We are ok with a timeout here. The event is sometimes not sent
1403 * due to a firmware bug. 1481 * due to a firmware bug. In case of another error (like SDIO timeout)
1482 * queue a recovery.
1404 */ 1483 */
1405 wl1271_cmd_wait_for_event_or_timeout(wl, 1484 if (ret)
1406 PEER_REMOVE_COMPLETE_EVENT_ID); 1485 wl12xx_queue_recovery_work(wl);
1407 1486
1408out_free: 1487out_free:
1409 kfree(cmd); 1488 kfree(cmd);
@@ -1573,19 +1652,25 @@ out:
1573int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id) 1652int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id)
1574{ 1653{
1575 int ret = 0; 1654 int ret = 0;
1655 bool is_first_roc;
1576 1656
1577 if (WARN_ON(test_bit(role_id, wl->roc_map))) 1657 if (WARN_ON(test_bit(role_id, wl->roc_map)))
1578 return 0; 1658 return 0;
1579 1659
1660 is_first_roc = (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) >=
1661 WL12XX_MAX_ROLES);
1662
1580 ret = wl12xx_cmd_roc(wl, wlvif, role_id); 1663 ret = wl12xx_cmd_roc(wl, wlvif, role_id);
1581 if (ret < 0) 1664 if (ret < 0)
1582 goto out; 1665 goto out;
1583 1666
1584 ret = wl1271_cmd_wait_for_event(wl, 1667 if (is_first_roc) {
1585 REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID); 1668 ret = wl1271_cmd_wait_for_event(wl,
1586 if (ret < 0) { 1669 REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID);
1587 wl1271_error("cmd roc event completion error"); 1670 if (ret < 0) {
1588 goto out; 1671 wl1271_error("cmd roc event completion error");
1672 goto out;
1673 }
1589 } 1674 }
1590 1675
1591 __set_bit(role_id, wl->roc_map); 1676 __set_bit(role_id, wl->roc_map);
@@ -1714,7 +1799,9 @@ int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1714 return -EINVAL; 1799 return -EINVAL;
1715 1800
1716 /* flush all pending packets */ 1801 /* flush all pending packets */
1717 wl1271_tx_work_locked(wl); 1802 ret = wlcore_tx_work_locked(wl);
1803 if (ret < 0)
1804 goto out;
1718 1805
1719 if (test_bit(wlvif->dev_role_id, wl->roc_map)) { 1806 if (test_bit(wlvif->dev_role_id, wl->roc_map)) {
1720 ret = wl12xx_croc(wl, wlvif->dev_role_id); 1807 ret = wl12xx_croc(wl, wlvif->dev_role_id);
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index a46ae07cb77e..d7d9f801e506 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -58,7 +58,7 @@ int wl1271_cmd_build_ps_poll(struct wl1271 *wl, struct wl12xx_vif *wlvif,
58int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif, 58int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
59 u8 role_id, u8 band, 59 u8 role_id, u8 band,
60 const u8 *ssid, size_t ssid_len, 60 const u8 *ssid, size_t ssid_len,
61 const u8 *ie, size_t ie_len); 61 const u8 *ie, size_t ie_len, bool sched_scan);
62struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl, 62struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
63 struct wl12xx_vif *wlvif, 63 struct wl12xx_vif *wlvif,
64 struct sk_buff *skb); 64 struct sk_buff *skb);
@@ -172,8 +172,8 @@ enum cmd_templ {
172 CMD_TEMPL_PS_POLL, 172 CMD_TEMPL_PS_POLL,
173 CMD_TEMPL_KLV, 173 CMD_TEMPL_KLV,
174 CMD_TEMPL_DISCONNECT, 174 CMD_TEMPL_DISCONNECT,
175 CMD_TEMPL_PROBE_REQ_2_4, /* for firmware internal use only */ 175 CMD_TEMPL_APP_PROBE_REQ_2_4,
176 CMD_TEMPL_PROBE_REQ_5, /* for firmware internal use only */ 176 CMD_TEMPL_APP_PROBE_REQ_5,
177 CMD_TEMPL_BAR, /* for firmware internal use only */ 177 CMD_TEMPL_BAR, /* for firmware internal use only */
178 CMD_TEMPL_CTS, /* 178 CMD_TEMPL_CTS, /*
179 * For CTS-to-self (FastCTS) mechanism 179 * For CTS-to-self (FastCTS) mechanism
@@ -192,7 +192,7 @@ enum cmd_templ {
192#define WL1271_COMMAND_TIMEOUT 2000 192#define WL1271_COMMAND_TIMEOUT 2000
193#define WL1271_CMD_TEMPL_DFLT_SIZE 252 193#define WL1271_CMD_TEMPL_DFLT_SIZE 252
194#define WL1271_CMD_TEMPL_MAX_SIZE 512 194#define WL1271_CMD_TEMPL_MAX_SIZE 512
195#define WL1271_EVENT_TIMEOUT 750 195#define WL1271_EVENT_TIMEOUT 1000
196 196
197struct wl1271_cmd_header { 197struct wl1271_cmd_header {
198 __le16 id; 198 __le16 id;
@@ -266,13 +266,22 @@ enum wlcore_band {
266 WLCORE_BAND_MAX_RADIO = 0x7F, 266 WLCORE_BAND_MAX_RADIO = 0x7F,
267}; 267};
268 268
269enum wlcore_channel_type {
270 WLCORE_CHAN_NO_HT,
271 WLCORE_CHAN_HT20,
272 WLCORE_CHAN_HT40MINUS,
273 WLCORE_CHAN_HT40PLUS
274};
275
269struct wl12xx_cmd_role_start { 276struct wl12xx_cmd_role_start {
270 struct wl1271_cmd_header header; 277 struct wl1271_cmd_header header;
271 278
272 u8 role_id; 279 u8 role_id;
273 u8 band; 280 u8 band;
274 u8 channel; 281 u8 channel;
275 u8 padding; 282
283 /* enum wlcore_channel_type */
284 u8 channel_type;
276 285
277 union { 286 union {
278 struct { 287 struct {
@@ -643,4 +652,25 @@ struct wl12xx_cmd_stop_channel_switch {
643 struct wl1271_cmd_header header; 652 struct wl1271_cmd_header header;
644} __packed; 653} __packed;
645 654
655/* Used to check radio status after calibration */
656#define MAX_TLV_LENGTH 500
657#define TEST_CMD_P2G_CAL 2 /* TX BiP */
658
659struct wl1271_cmd_cal_p2g {
660 struct wl1271_cmd_header header;
661
662 struct wl1271_cmd_test_header test;
663
664 __le32 ver;
665 __le16 len;
666 u8 buf[MAX_TLV_LENGTH];
667 u8 type;
668 u8 padding;
669
670 __le16 radio_status;
671
672 u8 sub_band_mask;
673 u8 padding2;
674} __packed;
675
646#endif /* __WL1271_CMD_H__ */ 676#endif /* __WL1271_CMD_H__ */
diff --git a/drivers/net/wireless/ti/wlcore/conf.h b/drivers/net/wireless/ti/wlcore/conf.h
index fef0db4213bc..d77224f2ac6b 100644
--- a/drivers/net/wireless/ti/wlcore/conf.h
+++ b/drivers/net/wireless/ti/wlcore/conf.h
@@ -45,7 +45,15 @@ enum {
45 CONF_HW_BIT_RATE_MCS_4 = BIT(17), 45 CONF_HW_BIT_RATE_MCS_4 = BIT(17),
46 CONF_HW_BIT_RATE_MCS_5 = BIT(18), 46 CONF_HW_BIT_RATE_MCS_5 = BIT(18),
47 CONF_HW_BIT_RATE_MCS_6 = BIT(19), 47 CONF_HW_BIT_RATE_MCS_6 = BIT(19),
48 CONF_HW_BIT_RATE_MCS_7 = BIT(20) 48 CONF_HW_BIT_RATE_MCS_7 = BIT(20),
49 CONF_HW_BIT_RATE_MCS_8 = BIT(21),
50 CONF_HW_BIT_RATE_MCS_9 = BIT(22),
51 CONF_HW_BIT_RATE_MCS_10 = BIT(23),
52 CONF_HW_BIT_RATE_MCS_11 = BIT(24),
53 CONF_HW_BIT_RATE_MCS_12 = BIT(25),
54 CONF_HW_BIT_RATE_MCS_13 = BIT(26),
55 CONF_HW_BIT_RATE_MCS_14 = BIT(27),
56 CONF_HW_BIT_RATE_MCS_15 = BIT(28),
49}; 57};
50 58
51enum { 59enum {
@@ -310,7 +318,7 @@ enum {
310struct conf_sg_settings { 318struct conf_sg_settings {
311 u32 params[CONF_SG_PARAMS_MAX]; 319 u32 params[CONF_SG_PARAMS_MAX];
312 u8 state; 320 u8 state;
313}; 321} __packed;
314 322
315enum conf_rx_queue_type { 323enum conf_rx_queue_type {
316 CONF_RX_QUEUE_TYPE_LOW_PRIORITY, /* All except the high priority */ 324 CONF_RX_QUEUE_TYPE_LOW_PRIORITY, /* All except the high priority */
@@ -394,7 +402,7 @@ struct conf_rx_settings {
394 * Range: RX_QUEUE_TYPE_RX_LOW_PRIORITY, RX_QUEUE_TYPE_RX_HIGH_PRIORITY, 402 * Range: RX_QUEUE_TYPE_RX_LOW_PRIORITY, RX_QUEUE_TYPE_RX_HIGH_PRIORITY,
395 */ 403 */
396 u8 queue_type; 404 u8 queue_type;
397}; 405} __packed;
398 406
399#define CONF_TX_MAX_RATE_CLASSES 10 407#define CONF_TX_MAX_RATE_CLASSES 10
400 408
@@ -435,6 +443,12 @@ struct conf_rx_settings {
435 CONF_HW_BIT_RATE_MCS_5 | CONF_HW_BIT_RATE_MCS_6 | \ 443 CONF_HW_BIT_RATE_MCS_5 | CONF_HW_BIT_RATE_MCS_6 | \
436 CONF_HW_BIT_RATE_MCS_7) 444 CONF_HW_BIT_RATE_MCS_7)
437 445
446#define CONF_TX_MIMO_RATES (CONF_HW_BIT_RATE_MCS_8 | \
447 CONF_HW_BIT_RATE_MCS_9 | CONF_HW_BIT_RATE_MCS_10 | \
448 CONF_HW_BIT_RATE_MCS_11 | CONF_HW_BIT_RATE_MCS_12 | \
449 CONF_HW_BIT_RATE_MCS_13 | CONF_HW_BIT_RATE_MCS_14 | \
450 CONF_HW_BIT_RATE_MCS_15)
451
438/* 452/*
439 * Default rates for management traffic when operating in AP mode. This 453 * Default rates for management traffic when operating in AP mode. This
440 * should be configured according to the basic rate set of the AP 454 * should be configured according to the basic rate set of the AP
@@ -487,7 +501,7 @@ struct conf_tx_rate_class {
487 * the policy (0 - long preamble, 1 - short preamble. 501 * the policy (0 - long preamble, 1 - short preamble.
488 */ 502 */
489 u8 aflags; 503 u8 aflags;
490}; 504} __packed;
491 505
492#define CONF_TX_MAX_AC_COUNT 4 506#define CONF_TX_MAX_AC_COUNT 4
493 507
@@ -504,7 +518,7 @@ enum conf_tx_ac {
504 CONF_TX_AC_VI = 2, /* video */ 518 CONF_TX_AC_VI = 2, /* video */
505 CONF_TX_AC_VO = 3, /* voice */ 519 CONF_TX_AC_VO = 3, /* voice */
506 CONF_TX_AC_CTS2SELF = 4, /* fictitious AC, follows AC_VO */ 520 CONF_TX_AC_CTS2SELF = 4, /* fictitious AC, follows AC_VO */
507 CONF_TX_AC_ANY_TID = 0x1f 521 CONF_TX_AC_ANY_TID = 0xff
508}; 522};
509 523
510struct conf_tx_ac_category { 524struct conf_tx_ac_category {
@@ -544,7 +558,7 @@ struct conf_tx_ac_category {
544 * Range: u16 558 * Range: u16
545 */ 559 */
546 u16 tx_op_limit; 560 u16 tx_op_limit;
547}; 561} __packed;
548 562
549#define CONF_TX_MAX_TID_COUNT 8 563#define CONF_TX_MAX_TID_COUNT 8
550 564
@@ -578,7 +592,7 @@ struct conf_tx_tid {
578 u8 ps_scheme; 592 u8 ps_scheme;
579 u8 ack_policy; 593 u8 ack_policy;
580 u32 apsd_conf[2]; 594 u32 apsd_conf[2];
581}; 595} __packed;
582 596
583struct conf_tx_settings { 597struct conf_tx_settings {
584 /* 598 /*
@@ -664,7 +678,7 @@ struct conf_tx_settings {
664 678
665 /* Time in ms for Tx watchdog timer to expire */ 679 /* Time in ms for Tx watchdog timer to expire */
666 u32 tx_watchdog_timeout; 680 u32 tx_watchdog_timeout;
667}; 681} __packed;
668 682
669enum { 683enum {
670 CONF_WAKE_UP_EVENT_BEACON = 0x01, /* Wake on every Beacon*/ 684 CONF_WAKE_UP_EVENT_BEACON = 0x01, /* Wake on every Beacon*/
@@ -711,7 +725,7 @@ struct conf_bcn_filt_rule {
711 * Version for the vendor specifie IE (221) 725 * Version for the vendor specifie IE (221)
712 */ 726 */
713 u8 version[CONF_BCN_IE_VER_LEN]; 727 u8 version[CONF_BCN_IE_VER_LEN];
714}; 728} __packed;
715 729
716#define CONF_MAX_RSSI_SNR_TRIGGERS 8 730#define CONF_MAX_RSSI_SNR_TRIGGERS 8
717 731
@@ -762,7 +776,7 @@ struct conf_sig_weights {
762 * Range: u8 776 * Range: u8
763 */ 777 */
764 u8 snr_pkt_avg_weight; 778 u8 snr_pkt_avg_weight;
765}; 779} __packed;
766 780
767enum conf_bcn_filt_mode { 781enum conf_bcn_filt_mode {
768 CONF_BCN_FILT_MODE_DISABLED = 0, 782 CONF_BCN_FILT_MODE_DISABLED = 0,
@@ -810,7 +824,7 @@ struct conf_conn_settings {
810 * 824 *
811 * Range: CONF_BCN_FILT_MODE_* 825 * Range: CONF_BCN_FILT_MODE_*
812 */ 826 */
813 enum conf_bcn_filt_mode bcn_filt_mode; 827 u8 bcn_filt_mode;
814 828
815 /* 829 /*
816 * Configure Beacon filter pass-thru rules. 830 * Configure Beacon filter pass-thru rules.
@@ -937,7 +951,13 @@ struct conf_conn_settings {
937 * Range: u16 951 * Range: u16
938 */ 952 */
939 u8 max_listen_interval; 953 u8 max_listen_interval;
940}; 954
955 /*
956 * Default sleep authorization for a new STA interface. This determines
957 * whether we can go to ELP.
958 */
959 u8 sta_sleep_auth;
960} __packed;
941 961
942enum { 962enum {
943 CONF_REF_CLK_19_2_E, 963 CONF_REF_CLK_19_2_E,
@@ -965,6 +985,11 @@ struct conf_itrim_settings {
965 985
966 /* moderation timeout in microsecs from the last TX */ 986 /* moderation timeout in microsecs from the last TX */
967 u32 timeout; 987 u32 timeout;
988} __packed;
989
990enum conf_fast_wakeup {
991 CONF_FAST_WAKEUP_ENABLE,
992 CONF_FAST_WAKEUP_DISABLE,
968}; 993};
969 994
970struct conf_pm_config_settings { 995struct conf_pm_config_settings {
@@ -978,10 +1003,10 @@ struct conf_pm_config_settings {
978 /* 1003 /*
979 * Host fast wakeup support 1004 * Host fast wakeup support
980 * 1005 *
981 * Range: true, false 1006 * Range: enum conf_fast_wakeup
982 */ 1007 */
983 bool host_fast_wakeup_support; 1008 u8 host_fast_wakeup_support;
984}; 1009} __packed;
985 1010
986struct conf_roam_trigger_settings { 1011struct conf_roam_trigger_settings {
987 /* 1012 /*
@@ -1018,7 +1043,7 @@ struct conf_roam_trigger_settings {
1018 * Range: 0 - 255 1043 * Range: 0 - 255
1019 */ 1044 */
1020 u8 avg_weight_snr_data; 1045 u8 avg_weight_snr_data;
1021}; 1046} __packed;
1022 1047
1023struct conf_scan_settings { 1048struct conf_scan_settings {
1024 /* 1049 /*
@@ -1064,7 +1089,7 @@ struct conf_scan_settings {
1064 * Range: u32 Microsecs 1089 * Range: u32 Microsecs
1065 */ 1090 */
1066 u32 split_scan_timeout; 1091 u32 split_scan_timeout;
1067}; 1092} __packed;
1068 1093
1069struct conf_sched_scan_settings { 1094struct conf_sched_scan_settings {
1070 /* 1095 /*
@@ -1102,7 +1127,7 @@ struct conf_sched_scan_settings {
1102 1127
1103 /* SNR threshold to be used for filtering */ 1128 /* SNR threshold to be used for filtering */
1104 s8 snr_threshold; 1129 s8 snr_threshold;
1105}; 1130} __packed;
1106 1131
1107struct conf_ht_setting { 1132struct conf_ht_setting {
1108 u8 rx_ba_win_size; 1133 u8 rx_ba_win_size;
@@ -1111,7 +1136,7 @@ struct conf_ht_setting {
1111 1136
1112 /* bitmap of enabled TIDs for TX BA sessions */ 1137 /* bitmap of enabled TIDs for TX BA sessions */
1113 u8 tx_ba_tid_bitmap; 1138 u8 tx_ba_tid_bitmap;
1114}; 1139} __packed;
1115 1140
1116struct conf_memory_settings { 1141struct conf_memory_settings {
1117 /* Number of stations supported in IBSS mode */ 1142 /* Number of stations supported in IBSS mode */
@@ -1151,7 +1176,7 @@ struct conf_memory_settings {
1151 * Range: 0-120 1176 * Range: 0-120
1152 */ 1177 */
1153 u8 tx_min; 1178 u8 tx_min;
1154}; 1179} __packed;
1155 1180
1156struct conf_fm_coex { 1181struct conf_fm_coex {
1157 u8 enable; 1182 u8 enable;
@@ -1164,7 +1189,7 @@ struct conf_fm_coex {
1164 u16 ldo_stabilization_time; 1189 u16 ldo_stabilization_time;
1165 u8 fm_disturbed_band_margin; 1190 u8 fm_disturbed_band_margin;
1166 u8 swallow_clk_diff; 1191 u8 swallow_clk_diff;
1167}; 1192} __packed;
1168 1193
1169struct conf_rx_streaming_settings { 1194struct conf_rx_streaming_settings {
1170 /* 1195 /*
@@ -1193,7 +1218,7 @@ struct conf_rx_streaming_settings {
1193 * enable rx streaming also when there is no coex activity 1218 * enable rx streaming also when there is no coex activity
1194 */ 1219 */
1195 u8 always; 1220 u8 always;
1196}; 1221} __packed;
1197 1222
1198struct conf_fwlog { 1223struct conf_fwlog {
1199 /* Continuous or on-demand */ 1224 /* Continuous or on-demand */
@@ -1217,7 +1242,7 @@ struct conf_fwlog {
1217 1242
1218 /* Regulates the frequency of log messages */ 1243 /* Regulates the frequency of log messages */
1219 u8 threshold; 1244 u8 threshold;
1220}; 1245} __packed;
1221 1246
1222#define ACX_RATE_MGMT_NUM_OF_RATES 13 1247#define ACX_RATE_MGMT_NUM_OF_RATES 13
1223struct conf_rate_policy_settings { 1248struct conf_rate_policy_settings {
@@ -1236,7 +1261,7 @@ struct conf_rate_policy_settings {
1236 u8 rate_check_up; 1261 u8 rate_check_up;
1237 u8 rate_check_down; 1262 u8 rate_check_down;
1238 u8 rate_retry_policy[ACX_RATE_MGMT_NUM_OF_RATES]; 1263 u8 rate_retry_policy[ACX_RATE_MGMT_NUM_OF_RATES];
1239}; 1264} __packed;
1240 1265
1241struct conf_hangover_settings { 1266struct conf_hangover_settings {
1242 u32 recover_time; 1267 u32 recover_time;
@@ -1250,7 +1275,23 @@ struct conf_hangover_settings {
1250 u8 quiet_time; 1275 u8 quiet_time;
1251 u8 increase_time; 1276 u8 increase_time;
1252 u8 window_size; 1277 u8 window_size;
1253}; 1278} __packed;
1279
1280/*
1281 * The conf version consists of 4 bytes. The two MSB are the wlcore
1282 * version, the two LSB are the lower driver's private conf
1283 * version.
1284 */
1285#define WLCORE_CONF_VERSION (0x0002 << 16)
1286#define WLCORE_CONF_MASK 0xffff0000
1287#define WLCORE_CONF_SIZE (sizeof(struct wlcore_conf_header) + \
1288 sizeof(struct wlcore_conf))
1289
1290struct wlcore_conf_header {
1291 __le32 magic;
1292 __le32 version;
1293 __le32 checksum;
1294} __packed;
1254 1295
1255struct wlcore_conf { 1296struct wlcore_conf {
1256 struct conf_sg_settings sg; 1297 struct conf_sg_settings sg;
@@ -1269,6 +1310,12 @@ struct wlcore_conf {
1269 struct conf_fwlog fwlog; 1310 struct conf_fwlog fwlog;
1270 struct conf_rate_policy_settings rate; 1311 struct conf_rate_policy_settings rate;
1271 struct conf_hangover_settings hangover; 1312 struct conf_hangover_settings hangover;
1272}; 1313} __packed;
1314
1315struct wlcore_conf_file {
1316 struct wlcore_conf_header header;
1317 struct wlcore_conf core;
1318 u8 priv[0];
1319} __packed;
1273 1320
1274#endif 1321#endif
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c
index d5aea1ff5ad1..80dbc5304fac 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.c
+++ b/drivers/net/wireless/ti/wlcore/debugfs.c
@@ -25,6 +25,7 @@
25 25
26#include <linux/skbuff.h> 26#include <linux/skbuff.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/module.h>
28 29
29#include "wlcore.h" 30#include "wlcore.h"
30#include "debug.h" 31#include "debug.h"
@@ -32,14 +33,16 @@
32#include "ps.h" 33#include "ps.h"
33#include "io.h" 34#include "io.h"
34#include "tx.h" 35#include "tx.h"
36#include "hw_ops.h"
35 37
36/* ms */ 38/* ms */
37#define WL1271_DEBUGFS_STATS_LIFETIME 1000 39#define WL1271_DEBUGFS_STATS_LIFETIME 1000
38 40
41#define WLCORE_MAX_BLOCK_SIZE ((size_t)(4*PAGE_SIZE))
42
39/* debugfs macros idea from mac80211 */ 43/* debugfs macros idea from mac80211 */
40#define DEBUGFS_FORMAT_BUFFER_SIZE 100 44int wl1271_format_buffer(char __user *userbuf, size_t count,
41static int wl1271_format_buffer(char __user *userbuf, size_t count, 45 loff_t *ppos, char *fmt, ...)
42 loff_t *ppos, char *fmt, ...)
43{ 46{
44 va_list args; 47 va_list args;
45 char buf[DEBUGFS_FORMAT_BUFFER_SIZE]; 48 char buf[DEBUGFS_FORMAT_BUFFER_SIZE];
@@ -51,59 +54,9 @@ static int wl1271_format_buffer(char __user *userbuf, size_t count,
51 54
52 return simple_read_from_buffer(userbuf, count, ppos, buf, res); 55 return simple_read_from_buffer(userbuf, count, ppos, buf, res);
53} 56}
57EXPORT_SYMBOL_GPL(wl1271_format_buffer);
54 58
55#define DEBUGFS_READONLY_FILE(name, fmt, value...) \ 59void wl1271_debugfs_update_stats(struct wl1271 *wl)
56static ssize_t name## _read(struct file *file, char __user *userbuf, \
57 size_t count, loff_t *ppos) \
58{ \
59 struct wl1271 *wl = file->private_data; \
60 return wl1271_format_buffer(userbuf, count, ppos, \
61 fmt "\n", ##value); \
62} \
63 \
64static const struct file_operations name## _ops = { \
65 .read = name## _read, \
66 .open = simple_open, \
67 .llseek = generic_file_llseek, \
68};
69
70#define DEBUGFS_ADD(name, parent) \
71 entry = debugfs_create_file(#name, 0400, parent, \
72 wl, &name## _ops); \
73 if (!entry || IS_ERR(entry)) \
74 goto err; \
75
76#define DEBUGFS_ADD_PREFIX(prefix, name, parent) \
77 do { \
78 entry = debugfs_create_file(#name, 0400, parent, \
79 wl, &prefix## _## name## _ops); \
80 if (!entry || IS_ERR(entry)) \
81 goto err; \
82 } while (0);
83
84#define DEBUGFS_FWSTATS_FILE(sub, name, fmt) \
85static ssize_t sub## _ ##name## _read(struct file *file, \
86 char __user *userbuf, \
87 size_t count, loff_t *ppos) \
88{ \
89 struct wl1271 *wl = file->private_data; \
90 \
91 wl1271_debugfs_update_stats(wl); \
92 \
93 return wl1271_format_buffer(userbuf, count, ppos, fmt "\n", \
94 wl->stats.fw_stats->sub.name); \
95} \
96 \
97static const struct file_operations sub## _ ##name## _ops = { \
98 .read = sub## _ ##name## _read, \
99 .open = simple_open, \
100 .llseek = generic_file_llseek, \
101};
102
103#define DEBUGFS_FWSTATS_ADD(sub, name) \
104 DEBUGFS_ADD(sub## _ ##name, stats)
105
106static void wl1271_debugfs_update_stats(struct wl1271 *wl)
107{ 60{
108 int ret; 61 int ret;
109 62
@@ -125,97 +78,7 @@ static void wl1271_debugfs_update_stats(struct wl1271 *wl)
125out: 78out:
126 mutex_unlock(&wl->mutex); 79 mutex_unlock(&wl->mutex);
127} 80}
128 81EXPORT_SYMBOL_GPL(wl1271_debugfs_update_stats);
129DEBUGFS_FWSTATS_FILE(tx, internal_desc_overflow, "%u");
130
131DEBUGFS_FWSTATS_FILE(rx, out_of_mem, "%u");
132DEBUGFS_FWSTATS_FILE(rx, hdr_overflow, "%u");
133DEBUGFS_FWSTATS_FILE(rx, hw_stuck, "%u");
134DEBUGFS_FWSTATS_FILE(rx, dropped, "%u");
135DEBUGFS_FWSTATS_FILE(rx, fcs_err, "%u");
136DEBUGFS_FWSTATS_FILE(rx, xfr_hint_trig, "%u");
137DEBUGFS_FWSTATS_FILE(rx, path_reset, "%u");
138DEBUGFS_FWSTATS_FILE(rx, reset_counter, "%u");
139
140DEBUGFS_FWSTATS_FILE(dma, rx_requested, "%u");
141DEBUGFS_FWSTATS_FILE(dma, rx_errors, "%u");
142DEBUGFS_FWSTATS_FILE(dma, tx_requested, "%u");
143DEBUGFS_FWSTATS_FILE(dma, tx_errors, "%u");
144
145DEBUGFS_FWSTATS_FILE(isr, cmd_cmplt, "%u");
146DEBUGFS_FWSTATS_FILE(isr, fiqs, "%u");
147DEBUGFS_FWSTATS_FILE(isr, rx_headers, "%u");
148DEBUGFS_FWSTATS_FILE(isr, rx_mem_overflow, "%u");
149DEBUGFS_FWSTATS_FILE(isr, rx_rdys, "%u");
150DEBUGFS_FWSTATS_FILE(isr, irqs, "%u");
151DEBUGFS_FWSTATS_FILE(isr, tx_procs, "%u");
152DEBUGFS_FWSTATS_FILE(isr, decrypt_done, "%u");
153DEBUGFS_FWSTATS_FILE(isr, dma0_done, "%u");
154DEBUGFS_FWSTATS_FILE(isr, dma1_done, "%u");
155DEBUGFS_FWSTATS_FILE(isr, tx_exch_complete, "%u");
156DEBUGFS_FWSTATS_FILE(isr, commands, "%u");
157DEBUGFS_FWSTATS_FILE(isr, rx_procs, "%u");
158DEBUGFS_FWSTATS_FILE(isr, hw_pm_mode_changes, "%u");
159DEBUGFS_FWSTATS_FILE(isr, host_acknowledges, "%u");
160DEBUGFS_FWSTATS_FILE(isr, pci_pm, "%u");
161DEBUGFS_FWSTATS_FILE(isr, wakeups, "%u");
162DEBUGFS_FWSTATS_FILE(isr, low_rssi, "%u");
163
164DEBUGFS_FWSTATS_FILE(wep, addr_key_count, "%u");
165DEBUGFS_FWSTATS_FILE(wep, default_key_count, "%u");
166/* skipping wep.reserved */
167DEBUGFS_FWSTATS_FILE(wep, key_not_found, "%u");
168DEBUGFS_FWSTATS_FILE(wep, decrypt_fail, "%u");
169DEBUGFS_FWSTATS_FILE(wep, packets, "%u");
170DEBUGFS_FWSTATS_FILE(wep, interrupt, "%u");
171
172DEBUGFS_FWSTATS_FILE(pwr, ps_enter, "%u");
173DEBUGFS_FWSTATS_FILE(pwr, elp_enter, "%u");
174DEBUGFS_FWSTATS_FILE(pwr, missing_bcns, "%u");
175DEBUGFS_FWSTATS_FILE(pwr, wake_on_host, "%u");
176DEBUGFS_FWSTATS_FILE(pwr, wake_on_timer_exp, "%u");
177DEBUGFS_FWSTATS_FILE(pwr, tx_with_ps, "%u");
178DEBUGFS_FWSTATS_FILE(pwr, tx_without_ps, "%u");
179DEBUGFS_FWSTATS_FILE(pwr, rcvd_beacons, "%u");
180DEBUGFS_FWSTATS_FILE(pwr, power_save_off, "%u");
181DEBUGFS_FWSTATS_FILE(pwr, enable_ps, "%u");
182DEBUGFS_FWSTATS_FILE(pwr, disable_ps, "%u");
183DEBUGFS_FWSTATS_FILE(pwr, fix_tsf_ps, "%u");
184/* skipping cont_miss_bcns_spread for now */
185DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_beacons, "%u");
186
187DEBUGFS_FWSTATS_FILE(mic, rx_pkts, "%u");
188DEBUGFS_FWSTATS_FILE(mic, calc_failure, "%u");
189
190DEBUGFS_FWSTATS_FILE(aes, encrypt_fail, "%u");
191DEBUGFS_FWSTATS_FILE(aes, decrypt_fail, "%u");
192DEBUGFS_FWSTATS_FILE(aes, encrypt_packets, "%u");
193DEBUGFS_FWSTATS_FILE(aes, decrypt_packets, "%u");
194DEBUGFS_FWSTATS_FILE(aes, encrypt_interrupt, "%u");
195DEBUGFS_FWSTATS_FILE(aes, decrypt_interrupt, "%u");
196
197DEBUGFS_FWSTATS_FILE(event, heart_beat, "%u");
198DEBUGFS_FWSTATS_FILE(event, calibration, "%u");
199DEBUGFS_FWSTATS_FILE(event, rx_mismatch, "%u");
200DEBUGFS_FWSTATS_FILE(event, rx_mem_empty, "%u");
201DEBUGFS_FWSTATS_FILE(event, rx_pool, "%u");
202DEBUGFS_FWSTATS_FILE(event, oom_late, "%u");
203DEBUGFS_FWSTATS_FILE(event, phy_transmit_error, "%u");
204DEBUGFS_FWSTATS_FILE(event, tx_stuck, "%u");
205
206DEBUGFS_FWSTATS_FILE(ps, pspoll_timeouts, "%u");
207DEBUGFS_FWSTATS_FILE(ps, upsd_timeouts, "%u");
208DEBUGFS_FWSTATS_FILE(ps, upsd_max_sptime, "%u");
209DEBUGFS_FWSTATS_FILE(ps, upsd_max_apturn, "%u");
210DEBUGFS_FWSTATS_FILE(ps, pspoll_max_apturn, "%u");
211DEBUGFS_FWSTATS_FILE(ps, pspoll_utilization, "%u");
212DEBUGFS_FWSTATS_FILE(ps, upsd_utilization, "%u");
213
214DEBUGFS_FWSTATS_FILE(rxpipe, rx_prep_beacon_drop, "%u");
215DEBUGFS_FWSTATS_FILE(rxpipe, descr_host_int_trig_rx_data, "%u");
216DEBUGFS_FWSTATS_FILE(rxpipe, beacon_buffer_thres_host_int_trig_rx_data, "%u");
217DEBUGFS_FWSTATS_FILE(rxpipe, missed_beacon_host_int_trig_rx_data, "%u");
218DEBUGFS_FWSTATS_FILE(rxpipe, tx_xfr_host_int_trig_rx_data, "%u");
219 82
220DEBUGFS_READONLY_FILE(retry_count, "%u", wl->stats.retry_count); 83DEBUGFS_READONLY_FILE(retry_count, "%u", wl->stats.retry_count);
221DEBUGFS_READONLY_FILE(excessive_retries, "%u", 84DEBUGFS_READONLY_FILE(excessive_retries, "%u",
@@ -241,6 +104,89 @@ static const struct file_operations tx_queue_len_ops = {
241 .llseek = default_llseek, 104 .llseek = default_llseek,
242}; 105};
243 106
107static void chip_op_handler(struct wl1271 *wl, unsigned long value,
108 void *arg)
109{
110 int ret;
111 int (*chip_op) (struct wl1271 *wl);
112
113 if (!arg) {
114 wl1271_warning("debugfs chip_op_handler with no callback");
115 return;
116 }
117
118 ret = wl1271_ps_elp_wakeup(wl);
119 if (ret < 0)
120 return;
121
122 chip_op = arg;
123 chip_op(wl);
124
125 wl1271_ps_elp_sleep(wl);
126}
127
128
129static inline void no_write_handler(struct wl1271 *wl,
130 unsigned long value,
131 unsigned long param)
132{
133}
134
135#define WL12XX_CONF_DEBUGFS(param, conf_sub_struct, \
136 min_val, max_val, write_handler_locked, \
137 write_handler_arg) \
138 static ssize_t param##_read(struct file *file, \
139 char __user *user_buf, \
140 size_t count, loff_t *ppos) \
141 { \
142 struct wl1271 *wl = file->private_data; \
143 return wl1271_format_buffer(user_buf, count, \
144 ppos, "%d\n", \
145 wl->conf.conf_sub_struct.param); \
146 } \
147 \
148 static ssize_t param##_write(struct file *file, \
149 const char __user *user_buf, \
150 size_t count, loff_t *ppos) \
151 { \
152 struct wl1271 *wl = file->private_data; \
153 unsigned long value; \
154 int ret; \
155 \
156 ret = kstrtoul_from_user(user_buf, count, 10, &value); \
157 if (ret < 0) { \
158 wl1271_warning("illegal value for " #param); \
159 return -EINVAL; \
160 } \
161 \
162 if (value < min_val || value > max_val) { \
163 wl1271_warning(#param " is not in valid range"); \
164 return -ERANGE; \
165 } \
166 \
167 mutex_lock(&wl->mutex); \
168 wl->conf.conf_sub_struct.param = value; \
169 \
170 write_handler_locked(wl, value, write_handler_arg); \
171 \
172 mutex_unlock(&wl->mutex); \
173 return count; \
174 } \
175 \
176 static const struct file_operations param##_ops = { \
177 .read = param##_read, \
178 .write = param##_write, \
179 .open = simple_open, \
180 .llseek = default_llseek, \
181 };
182
183WL12XX_CONF_DEBUGFS(irq_pkt_threshold, rx, 0, 65535,
184 chip_op_handler, wl1271_acx_init_rx_interrupt)
185WL12XX_CONF_DEBUGFS(irq_blk_threshold, rx, 0, 65535,
186 chip_op_handler, wl1271_acx_init_rx_interrupt)
187WL12XX_CONF_DEBUGFS(irq_timeout, rx, 0, 100,
188 chip_op_handler, wl1271_acx_init_rx_interrupt)
189
244static ssize_t gpio_power_read(struct file *file, char __user *user_buf, 190static ssize_t gpio_power_read(struct file *file, char __user *user_buf,
245 size_t count, loff_t *ppos) 191 size_t count, loff_t *ppos)
246{ 192{
@@ -535,8 +481,7 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
535 DRIVER_STATE_PRINT_LHEX(ap_ps_map); 481 DRIVER_STATE_PRINT_LHEX(ap_ps_map);
536 DRIVER_STATE_PRINT_HEX(quirks); 482 DRIVER_STATE_PRINT_HEX(quirks);
537 DRIVER_STATE_PRINT_HEX(irq); 483 DRIVER_STATE_PRINT_HEX(irq);
538 DRIVER_STATE_PRINT_HEX(ref_clock); 484 /* TODO: ref_clock and tcxo_clock were moved to wl12xx priv */
539 DRIVER_STATE_PRINT_HEX(tcxo_clock);
540 DRIVER_STATE_PRINT_HEX(hw_pg_ver); 485 DRIVER_STATE_PRINT_HEX(hw_pg_ver);
541 DRIVER_STATE_PRINT_HEX(platform_quirks); 486 DRIVER_STATE_PRINT_HEX(platform_quirks);
542 DRIVER_STATE_PRINT_HEX(chip.id); 487 DRIVER_STATE_PRINT_HEX(chip.id);
@@ -647,7 +592,6 @@ static ssize_t vifs_state_read(struct file *file, char __user *user_buf,
647 VIF_STATE_PRINT_INT(last_rssi_event); 592 VIF_STATE_PRINT_INT(last_rssi_event);
648 VIF_STATE_PRINT_INT(ba_support); 593 VIF_STATE_PRINT_INT(ba_support);
649 VIF_STATE_PRINT_INT(ba_allowed); 594 VIF_STATE_PRINT_INT(ba_allowed);
650 VIF_STATE_PRINT_INT(is_gem);
651 VIF_STATE_PRINT_LLHEX(tx_security_seq); 595 VIF_STATE_PRINT_LLHEX(tx_security_seq);
652 VIF_STATE_PRINT_INT(tx_security_last_seq_lsb); 596 VIF_STATE_PRINT_INT(tx_security_last_seq_lsb);
653 } 597 }
@@ -1002,108 +946,281 @@ static const struct file_operations beacon_filtering_ops = {
1002 .llseek = default_llseek, 946 .llseek = default_llseek,
1003}; 947};
1004 948
1005static int wl1271_debugfs_add_files(struct wl1271 *wl, 949static ssize_t fw_stats_raw_read(struct file *file,
1006 struct dentry *rootdir) 950 char __user *userbuf,
951 size_t count, loff_t *ppos)
1007{ 952{
1008 int ret = 0; 953 struct wl1271 *wl = file->private_data;
1009 struct dentry *entry, *stats, *streaming;
1010 954
1011 stats = debugfs_create_dir("fw-statistics", rootdir); 955 wl1271_debugfs_update_stats(wl);
1012 if (!stats || IS_ERR(stats)) { 956
1013 entry = stats; 957 return simple_read_from_buffer(userbuf, count, ppos,
1014 goto err; 958 wl->stats.fw_stats,
959 wl->stats.fw_stats_len);
960}
961
962static const struct file_operations fw_stats_raw_ops = {
963 .read = fw_stats_raw_read,
964 .open = simple_open,
965 .llseek = default_llseek,
966};
967
968static ssize_t sleep_auth_read(struct file *file, char __user *user_buf,
969 size_t count, loff_t *ppos)
970{
971 struct wl1271 *wl = file->private_data;
972
973 return wl1271_format_buffer(user_buf, count,
974 ppos, "%d\n",
975 wl->sleep_auth);
976}
977
978static ssize_t sleep_auth_write(struct file *file,
979 const char __user *user_buf,
980 size_t count, loff_t *ppos)
981{
982 struct wl1271 *wl = file->private_data;
983 unsigned long value;
984 int ret;
985
986 ret = kstrtoul_from_user(user_buf, count, 0, &value);
987 if (ret < 0) {
988 wl1271_warning("illegal value in sleep_auth");
989 return -EINVAL;
990 }
991
992 if (value < 0 || value > WL1271_PSM_MAX) {
993 wl1271_warning("sleep_auth must be between 0 and %d",
994 WL1271_PSM_MAX);
995 return -ERANGE;
996 }
997
998 mutex_lock(&wl->mutex);
999
1000 wl->conf.conn.sta_sleep_auth = value;
1001
1002 if (wl->state == WL1271_STATE_OFF) {
1003 /* this will show up on "read" in case we are off */
1004 wl->sleep_auth = value;
1005 goto out;
1015 } 1006 }
1016 1007
1017 DEBUGFS_FWSTATS_ADD(tx, internal_desc_overflow); 1008 ret = wl1271_ps_elp_wakeup(wl);
1018 1009 if (ret < 0)
1019 DEBUGFS_FWSTATS_ADD(rx, out_of_mem); 1010 goto out;
1020 DEBUGFS_FWSTATS_ADD(rx, hdr_overflow); 1011
1021 DEBUGFS_FWSTATS_ADD(rx, hw_stuck); 1012 ret = wl1271_acx_sleep_auth(wl, value);
1022 DEBUGFS_FWSTATS_ADD(rx, dropped); 1013 if (ret < 0)
1023 DEBUGFS_FWSTATS_ADD(rx, fcs_err); 1014 goto out_sleep;
1024 DEBUGFS_FWSTATS_ADD(rx, xfr_hint_trig); 1015
1025 DEBUGFS_FWSTATS_ADD(rx, path_reset); 1016out_sleep:
1026 DEBUGFS_FWSTATS_ADD(rx, reset_counter); 1017 wl1271_ps_elp_sleep(wl);
1027 1018out:
1028 DEBUGFS_FWSTATS_ADD(dma, rx_requested); 1019 mutex_unlock(&wl->mutex);
1029 DEBUGFS_FWSTATS_ADD(dma, rx_errors); 1020 return count;
1030 DEBUGFS_FWSTATS_ADD(dma, tx_requested); 1021}
1031 DEBUGFS_FWSTATS_ADD(dma, tx_errors); 1022
1032 1023static const struct file_operations sleep_auth_ops = {
1033 DEBUGFS_FWSTATS_ADD(isr, cmd_cmplt); 1024 .read = sleep_auth_read,
1034 DEBUGFS_FWSTATS_ADD(isr, fiqs); 1025 .write = sleep_auth_write,
1035 DEBUGFS_FWSTATS_ADD(isr, rx_headers); 1026 .open = simple_open,
1036 DEBUGFS_FWSTATS_ADD(isr, rx_mem_overflow); 1027 .llseek = default_llseek,
1037 DEBUGFS_FWSTATS_ADD(isr, rx_rdys); 1028};
1038 DEBUGFS_FWSTATS_ADD(isr, irqs); 1029
1039 DEBUGFS_FWSTATS_ADD(isr, tx_procs); 1030static ssize_t dev_mem_read(struct file *file,
1040 DEBUGFS_FWSTATS_ADD(isr, decrypt_done); 1031 char __user *user_buf, size_t count,
1041 DEBUGFS_FWSTATS_ADD(isr, dma0_done); 1032 loff_t *ppos)
1042 DEBUGFS_FWSTATS_ADD(isr, dma1_done); 1033{
1043 DEBUGFS_FWSTATS_ADD(isr, tx_exch_complete); 1034 struct wl1271 *wl = file->private_data;
1044 DEBUGFS_FWSTATS_ADD(isr, commands); 1035 struct wlcore_partition_set part, old_part;
1045 DEBUGFS_FWSTATS_ADD(isr, rx_procs); 1036 size_t bytes = count;
1046 DEBUGFS_FWSTATS_ADD(isr, hw_pm_mode_changes); 1037 int ret;
1047 DEBUGFS_FWSTATS_ADD(isr, host_acknowledges); 1038 char *buf;
1048 DEBUGFS_FWSTATS_ADD(isr, pci_pm); 1039
1049 DEBUGFS_FWSTATS_ADD(isr, wakeups); 1040 /* only requests of dword-aligned size and offset are supported */
1050 DEBUGFS_FWSTATS_ADD(isr, low_rssi); 1041 if (bytes % 4)
1051 1042 return -EINVAL;
1052 DEBUGFS_FWSTATS_ADD(wep, addr_key_count); 1043
1053 DEBUGFS_FWSTATS_ADD(wep, default_key_count); 1044 if (*ppos % 4)
1054 /* skipping wep.reserved */ 1045 return -EINVAL;
1055 DEBUGFS_FWSTATS_ADD(wep, key_not_found); 1046
1056 DEBUGFS_FWSTATS_ADD(wep, decrypt_fail); 1047 /* function should return in reasonable time */
1057 DEBUGFS_FWSTATS_ADD(wep, packets); 1048 bytes = min(bytes, WLCORE_MAX_BLOCK_SIZE);
1058 DEBUGFS_FWSTATS_ADD(wep, interrupt); 1049
1059 1050 if (bytes == 0)
1060 DEBUGFS_FWSTATS_ADD(pwr, ps_enter); 1051 return -EINVAL;
1061 DEBUGFS_FWSTATS_ADD(pwr, elp_enter); 1052
1062 DEBUGFS_FWSTATS_ADD(pwr, missing_bcns); 1053 memset(&part, 0, sizeof(part));
1063 DEBUGFS_FWSTATS_ADD(pwr, wake_on_host); 1054 part.mem.start = file->f_pos;
1064 DEBUGFS_FWSTATS_ADD(pwr, wake_on_timer_exp); 1055 part.mem.size = bytes;
1065 DEBUGFS_FWSTATS_ADD(pwr, tx_with_ps); 1056
1066 DEBUGFS_FWSTATS_ADD(pwr, tx_without_ps); 1057 buf = kmalloc(bytes, GFP_KERNEL);
1067 DEBUGFS_FWSTATS_ADD(pwr, rcvd_beacons); 1058 if (!buf)
1068 DEBUGFS_FWSTATS_ADD(pwr, power_save_off); 1059 return -ENOMEM;
1069 DEBUGFS_FWSTATS_ADD(pwr, enable_ps); 1060
1070 DEBUGFS_FWSTATS_ADD(pwr, disable_ps); 1061 mutex_lock(&wl->mutex);
1071 DEBUGFS_FWSTATS_ADD(pwr, fix_tsf_ps); 1062
1072 /* skipping cont_miss_bcns_spread for now */ 1063 if (wl->state == WL1271_STATE_OFF) {
1073 DEBUGFS_FWSTATS_ADD(pwr, rcvd_awake_beacons); 1064 ret = -EFAULT;
1074 1065 goto skip_read;
1075 DEBUGFS_FWSTATS_ADD(mic, rx_pkts); 1066 }
1076 DEBUGFS_FWSTATS_ADD(mic, calc_failure); 1067
1077 1068 ret = wl1271_ps_elp_wakeup(wl);
1078 DEBUGFS_FWSTATS_ADD(aes, encrypt_fail); 1069 if (ret < 0)
1079 DEBUGFS_FWSTATS_ADD(aes, decrypt_fail); 1070 goto skip_read;
1080 DEBUGFS_FWSTATS_ADD(aes, encrypt_packets); 1071
1081 DEBUGFS_FWSTATS_ADD(aes, decrypt_packets); 1072 /* store current partition and switch partition */
1082 DEBUGFS_FWSTATS_ADD(aes, encrypt_interrupt); 1073 memcpy(&old_part, &wl->curr_part, sizeof(old_part));
1083 DEBUGFS_FWSTATS_ADD(aes, decrypt_interrupt); 1074 ret = wlcore_set_partition(wl, &part);
1084 1075 if (ret < 0)
1085 DEBUGFS_FWSTATS_ADD(event, heart_beat); 1076 goto part_err;
1086 DEBUGFS_FWSTATS_ADD(event, calibration); 1077
1087 DEBUGFS_FWSTATS_ADD(event, rx_mismatch); 1078 ret = wlcore_raw_read(wl, 0, buf, bytes, false);
1088 DEBUGFS_FWSTATS_ADD(event, rx_mem_empty); 1079 if (ret < 0)
1089 DEBUGFS_FWSTATS_ADD(event, rx_pool); 1080 goto read_err;
1090 DEBUGFS_FWSTATS_ADD(event, oom_late); 1081
1091 DEBUGFS_FWSTATS_ADD(event, phy_transmit_error); 1082read_err:
1092 DEBUGFS_FWSTATS_ADD(event, tx_stuck); 1083 /* recover partition */
1093 1084 ret = wlcore_set_partition(wl, &old_part);
1094 DEBUGFS_FWSTATS_ADD(ps, pspoll_timeouts); 1085 if (ret < 0)
1095 DEBUGFS_FWSTATS_ADD(ps, upsd_timeouts); 1086 goto part_err;
1096 DEBUGFS_FWSTATS_ADD(ps, upsd_max_sptime); 1087
1097 DEBUGFS_FWSTATS_ADD(ps, upsd_max_apturn); 1088part_err:
1098 DEBUGFS_FWSTATS_ADD(ps, pspoll_max_apturn); 1089 wl1271_ps_elp_sleep(wl);
1099 DEBUGFS_FWSTATS_ADD(ps, pspoll_utilization); 1090
1100 DEBUGFS_FWSTATS_ADD(ps, upsd_utilization); 1091skip_read:
1101 1092 mutex_unlock(&wl->mutex);
1102 DEBUGFS_FWSTATS_ADD(rxpipe, rx_prep_beacon_drop); 1093
1103 DEBUGFS_FWSTATS_ADD(rxpipe, descr_host_int_trig_rx_data); 1094 if (ret == 0) {
1104 DEBUGFS_FWSTATS_ADD(rxpipe, beacon_buffer_thres_host_int_trig_rx_data); 1095 ret = copy_to_user(user_buf, buf, bytes);
1105 DEBUGFS_FWSTATS_ADD(rxpipe, missed_beacon_host_int_trig_rx_data); 1096 if (ret < bytes) {
1106 DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data); 1097 bytes -= ret;
1098 *ppos += bytes;
1099 ret = 0;
1100 } else {
1101 ret = -EFAULT;
1102 }
1103 }
1104
1105 kfree(buf);
1106
1107 return ((ret == 0) ? bytes : ret);
1108}
1109
1110static ssize_t dev_mem_write(struct file *file, const char __user *user_buf,
1111 size_t count, loff_t *ppos)
1112{
1113 struct wl1271 *wl = file->private_data;
1114 struct wlcore_partition_set part, old_part;
1115 size_t bytes = count;
1116 int ret;
1117 char *buf;
1118
1119 /* only requests of dword-aligned size and offset are supported */
1120 if (bytes % 4)
1121 return -EINVAL;
1122
1123 if (*ppos % 4)
1124 return -EINVAL;
1125
1126 /* function should return in reasonable time */
1127 bytes = min(bytes, WLCORE_MAX_BLOCK_SIZE);
1128
1129 if (bytes == 0)
1130 return -EINVAL;
1131
1132 memset(&part, 0, sizeof(part));
1133 part.mem.start = file->f_pos;
1134 part.mem.size = bytes;
1135
1136 buf = kmalloc(bytes, GFP_KERNEL);
1137 if (!buf)
1138 return -ENOMEM;
1139
1140 ret = copy_from_user(buf, user_buf, bytes);
1141 if (ret) {
1142 ret = -EFAULT;
1143 goto err_out;
1144 }
1145
1146 mutex_lock(&wl->mutex);
1147
1148 if (wl->state == WL1271_STATE_OFF) {
1149 ret = -EFAULT;
1150 goto skip_write;
1151 }
1152
1153 ret = wl1271_ps_elp_wakeup(wl);
1154 if (ret < 0)
1155 goto skip_write;
1156
1157 /* store current partition and switch partition */
1158 memcpy(&old_part, &wl->curr_part, sizeof(old_part));
1159 ret = wlcore_set_partition(wl, &part);
1160 if (ret < 0)
1161 goto part_err;
1162
1163 ret = wlcore_raw_write(wl, 0, buf, bytes, false);
1164 if (ret < 0)
1165 goto write_err;
1166
1167write_err:
1168 /* recover partition */
1169 ret = wlcore_set_partition(wl, &old_part);
1170 if (ret < 0)
1171 goto part_err;
1172
1173part_err:
1174 wl1271_ps_elp_sleep(wl);
1175
1176skip_write:
1177 mutex_unlock(&wl->mutex);
1178
1179 if (ret == 0)
1180 *ppos += bytes;
1181
1182err_out:
1183 kfree(buf);
1184
1185 return ((ret == 0) ? bytes : ret);
1186}
1187
1188static loff_t dev_mem_seek(struct file *file, loff_t offset, int orig)
1189{
1190 loff_t ret;
1191
1192 /* only requests of dword-aligned size and offset are supported */
1193 if (offset % 4)
1194 return -EINVAL;
1195
1196 switch (orig) {
1197 case SEEK_SET:
1198 file->f_pos = offset;
1199 ret = file->f_pos;
1200 break;
1201 case SEEK_CUR:
1202 file->f_pos += offset;
1203 ret = file->f_pos;
1204 break;
1205 default:
1206 ret = -EINVAL;
1207 }
1208
1209 return ret;
1210}
1211
1212static const struct file_operations dev_mem_ops = {
1213 .open = simple_open,
1214 .read = dev_mem_read,
1215 .write = dev_mem_write,
1216 .llseek = dev_mem_seek,
1217};
1218
1219static int wl1271_debugfs_add_files(struct wl1271 *wl,
1220 struct dentry *rootdir)
1221{
1222 int ret = 0;
1223 struct dentry *entry, *streaming;
1107 1224
1108 DEBUGFS_ADD(tx_queue_len, rootdir); 1225 DEBUGFS_ADD(tx_queue_len, rootdir);
1109 DEBUGFS_ADD(retry_count, rootdir); 1226 DEBUGFS_ADD(retry_count, rootdir);
@@ -1120,6 +1237,11 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl,
1120 DEBUGFS_ADD(dynamic_ps_timeout, rootdir); 1237 DEBUGFS_ADD(dynamic_ps_timeout, rootdir);
1121 DEBUGFS_ADD(forced_ps, rootdir); 1238 DEBUGFS_ADD(forced_ps, rootdir);
1122 DEBUGFS_ADD(split_scan_timeout, rootdir); 1239 DEBUGFS_ADD(split_scan_timeout, rootdir);
1240 DEBUGFS_ADD(irq_pkt_threshold, rootdir);
1241 DEBUGFS_ADD(irq_blk_threshold, rootdir);
1242 DEBUGFS_ADD(irq_timeout, rootdir);
1243 DEBUGFS_ADD(fw_stats_raw, rootdir);
1244 DEBUGFS_ADD(sleep_auth, rootdir);
1123 1245
1124 streaming = debugfs_create_dir("rx_streaming", rootdir); 1246 streaming = debugfs_create_dir("rx_streaming", rootdir);
1125 if (!streaming || IS_ERR(streaming)) 1247 if (!streaming || IS_ERR(streaming))
@@ -1128,6 +1250,7 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl,
1128 DEBUGFS_ADD_PREFIX(rx_streaming, interval, streaming); 1250 DEBUGFS_ADD_PREFIX(rx_streaming, interval, streaming);
1129 DEBUGFS_ADD_PREFIX(rx_streaming, always, streaming); 1251 DEBUGFS_ADD_PREFIX(rx_streaming, always, streaming);
1130 1252
1253 DEBUGFS_ADD_PREFIX(dev, mem, rootdir);
1131 1254
1132 return 0; 1255 return 0;
1133 1256
@@ -1145,7 +1268,7 @@ void wl1271_debugfs_reset(struct wl1271 *wl)
1145 if (!wl->stats.fw_stats) 1268 if (!wl->stats.fw_stats)
1146 return; 1269 return;
1147 1270
1148 memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats)); 1271 memset(wl->stats.fw_stats, 0, wl->stats.fw_stats_len);
1149 wl->stats.retry_count = 0; 1272 wl->stats.retry_count = 0;
1150 wl->stats.excessive_retries = 0; 1273 wl->stats.excessive_retries = 0;
1151} 1274}
@@ -1160,34 +1283,34 @@ int wl1271_debugfs_init(struct wl1271 *wl)
1160 1283
1161 if (IS_ERR(rootdir)) { 1284 if (IS_ERR(rootdir)) {
1162 ret = PTR_ERR(rootdir); 1285 ret = PTR_ERR(rootdir);
1163 goto err; 1286 goto out;
1164 } 1287 }
1165 1288
1166 wl->stats.fw_stats = kzalloc(sizeof(*wl->stats.fw_stats), 1289 wl->stats.fw_stats = kzalloc(wl->stats.fw_stats_len, GFP_KERNEL);
1167 GFP_KERNEL);
1168
1169 if (!wl->stats.fw_stats) { 1290 if (!wl->stats.fw_stats) {
1170 ret = -ENOMEM; 1291 ret = -ENOMEM;
1171 goto err_fw; 1292 goto out_remove;
1172 } 1293 }
1173 1294
1174 wl->stats.fw_stats_update = jiffies; 1295 wl->stats.fw_stats_update = jiffies;
1175 1296
1176 ret = wl1271_debugfs_add_files(wl, rootdir); 1297 ret = wl1271_debugfs_add_files(wl, rootdir);
1298 if (ret < 0)
1299 goto out_exit;
1177 1300
1301 ret = wlcore_debugfs_init(wl, rootdir);
1178 if (ret < 0) 1302 if (ret < 0)
1179 goto err_file; 1303 goto out_exit;
1180 1304
1181 return 0; 1305 goto out;
1182 1306
1183err_file: 1307out_exit:
1184 kfree(wl->stats.fw_stats); 1308 wl1271_debugfs_exit(wl);
1185 wl->stats.fw_stats = NULL;
1186 1309
1187err_fw: 1310out_remove:
1188 debugfs_remove_recursive(rootdir); 1311 debugfs_remove_recursive(rootdir);
1189 1312
1190err: 1313out:
1191 return ret; 1314 return ret;
1192} 1315}
1193 1316
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
index a8d3aef011ff..f7381dd69009 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.h
+++ b/drivers/net/wireless/ti/wlcore/debugfs.h
@@ -26,8 +26,95 @@
26 26
27#include "wlcore.h" 27#include "wlcore.h"
28 28
29int wl1271_format_buffer(char __user *userbuf, size_t count,
30 loff_t *ppos, char *fmt, ...);
31
29int wl1271_debugfs_init(struct wl1271 *wl); 32int wl1271_debugfs_init(struct wl1271 *wl);
30void wl1271_debugfs_exit(struct wl1271 *wl); 33void wl1271_debugfs_exit(struct wl1271 *wl);
31void wl1271_debugfs_reset(struct wl1271 *wl); 34void wl1271_debugfs_reset(struct wl1271 *wl);
35void wl1271_debugfs_update_stats(struct wl1271 *wl);
36
37#define DEBUGFS_FORMAT_BUFFER_SIZE 256
38
39#define DEBUGFS_READONLY_FILE(name, fmt, value...) \
40static ssize_t name## _read(struct file *file, char __user *userbuf, \
41 size_t count, loff_t *ppos) \
42{ \
43 struct wl1271 *wl = file->private_data; \
44 return wl1271_format_buffer(userbuf, count, ppos, \
45 fmt "\n", ##value); \
46} \
47 \
48static const struct file_operations name## _ops = { \
49 .read = name## _read, \
50 .open = simple_open, \
51 .llseek = generic_file_llseek, \
52};
53
54#define DEBUGFS_ADD(name, parent) \
55 do { \
56 entry = debugfs_create_file(#name, 0400, parent, \
57 wl, &name## _ops); \
58 if (!entry || IS_ERR(entry)) \
59 goto err; \
60 } while (0);
61
62
63#define DEBUGFS_ADD_PREFIX(prefix, name, parent) \
64 do { \
65 entry = debugfs_create_file(#name, 0400, parent, \
66 wl, &prefix## _## name## _ops); \
67 if (!entry || IS_ERR(entry)) \
68 goto err; \
69 } while (0);
70
71#define DEBUGFS_FWSTATS_FILE(sub, name, fmt, struct_type) \
72static ssize_t sub## _ ##name## _read(struct file *file, \
73 char __user *userbuf, \
74 size_t count, loff_t *ppos) \
75{ \
76 struct wl1271 *wl = file->private_data; \
77 struct struct_type *stats = wl->stats.fw_stats; \
78 \
79 wl1271_debugfs_update_stats(wl); \
80 \
81 return wl1271_format_buffer(userbuf, count, ppos, fmt "\n", \
82 stats->sub.name); \
83} \
84 \
85static const struct file_operations sub## _ ##name## _ops = { \
86 .read = sub## _ ##name## _read, \
87 .open = simple_open, \
88 .llseek = generic_file_llseek, \
89};
90
91#define DEBUGFS_FWSTATS_FILE_ARRAY(sub, name, len, struct_type) \
92static ssize_t sub## _ ##name## _read(struct file *file, \
93 char __user *userbuf, \
94 size_t count, loff_t *ppos) \
95{ \
96 struct wl1271 *wl = file->private_data; \
97 struct struct_type *stats = wl->stats.fw_stats; \
98 char buf[DEBUGFS_FORMAT_BUFFER_SIZE] = ""; \
99 int res, i; \
100 \
101 wl1271_debugfs_update_stats(wl); \
102 \
103 for (i = 0; i < len; i++) \
104 res = snprintf(buf, sizeof(buf), "%s[%d] = %d\n", \
105 buf, i, stats->sub.name[i]); \
106 \
107 return wl1271_format_buffer(userbuf, count, ppos, "%s", buf); \
108} \
109 \
110static const struct file_operations sub## _ ##name## _ops = { \
111 .read = sub## _ ##name## _read, \
112 .open = simple_open, \
113 .llseek = generic_file_llseek, \
114};
115
116#define DEBUGFS_FWSTATS_ADD(sub, name) \
117 DEBUGFS_ADD(sub## _ ##name, stats)
118
32 119
33#endif /* WL1271_DEBUGFS_H */ 120#endif /* WL1271_DEBUGFS_H */
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index 28e2a633c3be..48907054d493 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -105,6 +105,7 @@ static int wl1271_event_process(struct wl1271 *wl)
105 u32 vector; 105 u32 vector;
106 bool disconnect_sta = false; 106 bool disconnect_sta = false;
107 unsigned long sta_bitmap = 0; 107 unsigned long sta_bitmap = 0;
108 int ret;
108 109
109 wl1271_event_mbox_dump(mbox); 110 wl1271_event_mbox_dump(mbox);
110 111
@@ -148,15 +149,33 @@ static int wl1271_event_process(struct wl1271 *wl)
148 int delay = wl->conf.conn.synch_fail_thold * 149 int delay = wl->conf.conn.synch_fail_thold *
149 wl->conf.conn.bss_lose_timeout; 150 wl->conf.conn.bss_lose_timeout;
150 wl1271_info("Beacon loss detected."); 151 wl1271_info("Beacon loss detected.");
151 cancel_delayed_work_sync(&wl->connection_loss_work); 152
153 /*
154 * if the work is already queued, it should take place. We
155 * don't want to delay the connection loss indication
156 * any more.
157 */
152 ieee80211_queue_delayed_work(wl->hw, &wl->connection_loss_work, 158 ieee80211_queue_delayed_work(wl->hw, &wl->connection_loss_work,
153 msecs_to_jiffies(delay)); 159 msecs_to_jiffies(delay));
160
161 wl12xx_for_each_wlvif_sta(wl, wlvif) {
162 vif = wl12xx_wlvif_to_vif(wlvif);
163
164 ieee80211_cqm_rssi_notify(
165 vif,
166 NL80211_CQM_RSSI_BEACON_LOSS_EVENT,
167 GFP_KERNEL);
168 }
154 } 169 }
155 170
156 if (vector & REGAINED_BSS_EVENT_ID) { 171 if (vector & REGAINED_BSS_EVENT_ID) {
157 /* TODO: check for multi-role */ 172 /* TODO: check for multi-role */
158 wl1271_info("Beacon regained."); 173 wl1271_info("Beacon regained.");
159 cancel_delayed_work_sync(&wl->connection_loss_work); 174 cancel_delayed_work(&wl->connection_loss_work);
175
176 /* sanity check - we can't lose and gain the beacon together */
177 WARN(vector & BSS_LOSE_EVENT_ID,
178 "Concurrent beacon loss and gain from FW");
160 } 179 }
161 180
162 if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) { 181 if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) {
@@ -210,7 +229,9 @@ static int wl1271_event_process(struct wl1271 *wl)
210 229
211 if ((vector & DUMMY_PACKET_EVENT_ID)) { 230 if ((vector & DUMMY_PACKET_EVENT_ID)) {
212 wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID"); 231 wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
213 wl1271_tx_dummy_packet(wl); 232 ret = wl1271_tx_dummy_packet(wl);
233 if (ret < 0)
234 return ret;
214 } 235 }
215 236
216 /* 237 /*
@@ -283,8 +304,10 @@ int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
283 return -EINVAL; 304 return -EINVAL;
284 305
285 /* first we read the mbox descriptor */ 306 /* first we read the mbox descriptor */
286 wl1271_read(wl, wl->mbox_ptr[mbox_num], wl->mbox, 307 ret = wlcore_read(wl, wl->mbox_ptr[mbox_num], wl->mbox,
287 sizeof(*wl->mbox), false); 308 sizeof(*wl->mbox), false);
309 if (ret < 0)
310 return ret;
288 311
289 /* process the descriptor */ 312 /* process the descriptor */
290 ret = wl1271_event_process(wl); 313 ret = wl1271_event_process(wl);
@@ -295,7 +318,7 @@ int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
295 * TODO: we just need this because one bit is in a different 318 * TODO: we just need this because one bit is in a different
296 * place. Is there any better way? 319 * place. Is there any better way?
297 */ 320 */
298 wl->ops->ack_event(wl); 321 ret = wl->ops->ack_event(wl);
299 322
300 return 0; 323 return ret;
301} 324}
diff --git a/drivers/net/wireless/ti/wlcore/hw_ops.h b/drivers/net/wireless/ti/wlcore/hw_ops.h
index 9384b4d56c24..2673d783ec1e 100644
--- a/drivers/net/wireless/ti/wlcore/hw_ops.h
+++ b/drivers/net/wireless/ti/wlcore/hw_ops.h
@@ -65,11 +65,13 @@ wlcore_hw_get_rx_buf_align(struct wl1271 *wl, u32 rx_desc)
65 return wl->ops->get_rx_buf_align(wl, rx_desc); 65 return wl->ops->get_rx_buf_align(wl, rx_desc);
66} 66}
67 67
68static inline void 68static inline int
69wlcore_hw_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len) 69wlcore_hw_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len)
70{ 70{
71 if (wl->ops->prepare_read) 71 if (wl->ops->prepare_read)
72 wl->ops->prepare_read(wl, rx_desc, len); 72 return wl->ops->prepare_read(wl, rx_desc, len);
73
74 return 0;
73} 75}
74 76
75static inline u32 77static inline u32
@@ -81,10 +83,12 @@ wlcore_hw_get_rx_packet_len(struct wl1271 *wl, void *rx_data, u32 data_len)
81 return wl->ops->get_rx_packet_len(wl, rx_data, data_len); 83 return wl->ops->get_rx_packet_len(wl, rx_data, data_len);
82} 84}
83 85
84static inline void wlcore_hw_tx_delayed_compl(struct wl1271 *wl) 86static inline int wlcore_hw_tx_delayed_compl(struct wl1271 *wl)
85{ 87{
86 if (wl->ops->tx_delayed_compl) 88 if (wl->ops->tx_delayed_compl)
87 wl->ops->tx_delayed_compl(wl); 89 return wl->ops->tx_delayed_compl(wl);
90
91 return 0;
88} 92}
89 93
90static inline void wlcore_hw_tx_immediate_compl(struct wl1271 *wl) 94static inline void wlcore_hw_tx_immediate_compl(struct wl1271 *wl)
@@ -119,4 +123,82 @@ static inline int wlcore_identify_fw(struct wl1271 *wl)
119 return 0; 123 return 0;
120} 124}
121 125
126static inline void
127wlcore_hw_set_tx_desc_csum(struct wl1271 *wl,
128 struct wl1271_tx_hw_descr *desc,
129 struct sk_buff *skb)
130{
131 if (!wl->ops->set_tx_desc_csum)
132 BUG_ON(1);
133
134 wl->ops->set_tx_desc_csum(wl, desc, skb);
135}
136
137static inline void
138wlcore_hw_set_rx_csum(struct wl1271 *wl,
139 struct wl1271_rx_descriptor *desc,
140 struct sk_buff *skb)
141{
142 if (wl->ops->set_rx_csum)
143 wl->ops->set_rx_csum(wl, desc, skb);
144}
145
146static inline u32
147wlcore_hw_ap_get_mimo_wide_rate_mask(struct wl1271 *wl,
148 struct wl12xx_vif *wlvif)
149{
150 if (wl->ops->ap_get_mimo_wide_rate_mask)
151 return wl->ops->ap_get_mimo_wide_rate_mask(wl, wlvif);
152
153 return 0;
154}
155
156static inline int
157wlcore_debugfs_init(struct wl1271 *wl, struct dentry *rootdir)
158{
159 if (wl->ops->debugfs_init)
160 return wl->ops->debugfs_init(wl, rootdir);
161
162 return 0;
163}
164
165static inline int
166wlcore_handle_static_data(struct wl1271 *wl, void *static_data)
167{
168 if (wl->ops->handle_static_data)
169 return wl->ops->handle_static_data(wl, static_data);
170
171 return 0;
172}
173
174static inline int
175wlcore_hw_get_spare_blocks(struct wl1271 *wl, bool is_gem)
176{
177 if (!wl->ops->get_spare_blocks)
178 BUG_ON(1);
179
180 return wl->ops->get_spare_blocks(wl, is_gem);
181}
182
183static inline int
184wlcore_hw_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
185 struct ieee80211_vif *vif,
186 struct ieee80211_sta *sta,
187 struct ieee80211_key_conf *key_conf)
188{
189 if (!wl->ops->set_key)
190 BUG_ON(1);
191
192 return wl->ops->set_key(wl, cmd, vif, sta, key_conf);
193}
194
195static inline u32
196wlcore_hw_pre_pkt_send(struct wl1271 *wl, u32 buf_offset, u32 last_len)
197{
198 if (wl->ops->pre_pkt_send)
199 return wl->ops->pre_pkt_send(wl, buf_offset, last_len);
200
201 return buf_offset;
202}
203
122#endif 204#endif
diff --git a/drivers/net/wireless/ti/wlcore/ini.h b/drivers/net/wireless/ti/wlcore/ini.h
index 4cf9ecc56212..d24fe3bbc672 100644
--- a/drivers/net/wireless/ti/wlcore/ini.h
+++ b/drivers/net/wireless/ti/wlcore/ini.h
@@ -172,7 +172,19 @@ struct wl128x_ini_fem_params_5 {
172 172
173/* NVS data structure */ 173/* NVS data structure */
174#define WL1271_INI_NVS_SECTION_SIZE 468 174#define WL1271_INI_NVS_SECTION_SIZE 468
175#define WL1271_INI_FEM_MODULE_COUNT 2 175
176/* We have four FEM module types: 0-RFMD, 1-TQS, 2-SKW, 3-TQS_HP */
177#define WL1271_INI_FEM_MODULE_COUNT 4
178
179/*
180 * In NVS we only store two FEM module entries -
181 * FEM modules 0,2,3 are stored in entry 0
182 * FEM module 1 is stored in entry 1
183 */
184#define WL12XX_NVS_FEM_MODULE_COUNT 2
185
186#define WL12XX_FEM_TO_NVS_ENTRY(ini_fem_module) \
187 ((ini_fem_module) == 1 ? 1 : 0)
176 188
177#define WL1271_INI_LEGACY_NVS_FILE_SIZE 800 189#define WL1271_INI_LEGACY_NVS_FILE_SIZE 800
178 190
@@ -188,13 +200,13 @@ struct wl1271_nvs_file {
188 struct { 200 struct {
189 struct wl1271_ini_fem_params_2 params; 201 struct wl1271_ini_fem_params_2 params;
190 u8 padding; 202 u8 padding;
191 } dyn_radio_params_2[WL1271_INI_FEM_MODULE_COUNT]; 203 } dyn_radio_params_2[WL12XX_NVS_FEM_MODULE_COUNT];
192 struct wl1271_ini_band_params_5 stat_radio_params_5; 204 struct wl1271_ini_band_params_5 stat_radio_params_5;
193 u8 padding3; 205 u8 padding3;
194 struct { 206 struct {
195 struct wl1271_ini_fem_params_5 params; 207 struct wl1271_ini_fem_params_5 params;
196 u8 padding; 208 u8 padding;
197 } dyn_radio_params_5[WL1271_INI_FEM_MODULE_COUNT]; 209 } dyn_radio_params_5[WL12XX_NVS_FEM_MODULE_COUNT];
198} __packed; 210} __packed;
199 211
200struct wl128x_nvs_file { 212struct wl128x_nvs_file {
@@ -209,12 +221,12 @@ struct wl128x_nvs_file {
209 struct { 221 struct {
210 struct wl128x_ini_fem_params_2 params; 222 struct wl128x_ini_fem_params_2 params;
211 u8 padding; 223 u8 padding;
212 } dyn_radio_params_2[WL1271_INI_FEM_MODULE_COUNT]; 224 } dyn_radio_params_2[WL12XX_NVS_FEM_MODULE_COUNT];
213 struct wl128x_ini_band_params_5 stat_radio_params_5; 225 struct wl128x_ini_band_params_5 stat_radio_params_5;
214 u8 padding3; 226 u8 padding3;
215 struct { 227 struct {
216 struct wl128x_ini_fem_params_5 params; 228 struct wl128x_ini_fem_params_5 params;
217 u8 padding; 229 u8 padding;
218 } dyn_radio_params_5[WL1271_INI_FEM_MODULE_COUNT]; 230 } dyn_radio_params_5[WL12XX_NVS_FEM_MODULE_COUNT];
219} __packed; 231} __packed;
220#endif 232#endif
diff --git a/drivers/net/wireless/ti/wlcore/init.c b/drivers/net/wireless/ti/wlcore/init.c
index 9f89255eb6e6..a3c867786df8 100644
--- a/drivers/net/wireless/ti/wlcore/init.c
+++ b/drivers/net/wireless/ti/wlcore/init.c
@@ -54,6 +54,22 @@ int wl1271_init_templates_config(struct wl1271 *wl)
54 if (ret < 0) 54 if (ret < 0)
55 return ret; 55 return ret;
56 56
57 if (wl->quirks & WLCORE_QUIRK_DUAL_PROBE_TMPL) {
58 ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
59 CMD_TEMPL_APP_PROBE_REQ_2_4, NULL,
60 WL1271_CMD_TEMPL_MAX_SIZE,
61 0, WL1271_RATE_AUTOMATIC);
62 if (ret < 0)
63 return ret;
64
65 ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
66 CMD_TEMPL_APP_PROBE_REQ_5, NULL,
67 WL1271_CMD_TEMPL_MAX_SIZE,
68 0, WL1271_RATE_AUTOMATIC);
69 if (ret < 0)
70 return ret;
71 }
72
57 ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID, 73 ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
58 CMD_TEMPL_NULL_DATA, NULL, 74 CMD_TEMPL_NULL_DATA, NULL,
59 sizeof(struct wl12xx_null_data_template), 75 sizeof(struct wl12xx_null_data_template),
@@ -460,6 +476,9 @@ int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif)
460 /* unconditionally enable HT rates */ 476 /* unconditionally enable HT rates */
461 supported_rates |= CONF_TX_MCS_RATES; 477 supported_rates |= CONF_TX_MCS_RATES;
462 478
479 /* get extra MIMO or wide-chan rates where the HW supports it */
480 supported_rates |= wlcore_hw_ap_get_mimo_wide_rate_mask(wl, wlvif);
481
463 /* configure unicast TX rate classes */ 482 /* configure unicast TX rate classes */
464 for (i = 0; i < wl->conf.tx.ac_conf_count; i++) { 483 for (i = 0; i < wl->conf.tx.ac_conf_count; i++) {
465 rc.enabled_rates = supported_rates; 484 rc.enabled_rates = supported_rates;
@@ -551,29 +570,28 @@ int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif)
551 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); 570 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
552 int ret, i; 571 int ret, i;
553 572
554 /* 573 /* consider all existing roles before configuring psm. */
555 * consider all existing roles before configuring psm. 574
556 * TODO: reconfigure on interface removal. 575 if (wl->ap_count == 0 && is_ap) { /* first AP */
557 */ 576 /* Configure for power always on */
558 if (!wl->ap_count) { 577 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
559 if (is_ap) { 578 if (ret < 0)
560 /* Configure for power always on */ 579 return ret;
580 /* first STA, no APs */
581 } else if (wl->sta_count == 0 && wl->ap_count == 0 && !is_ap) {
582 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
583 /* Configure for power according to debugfs */
584 if (sta_auth != WL1271_PSM_ILLEGAL)
585 ret = wl1271_acx_sleep_auth(wl, sta_auth);
586 /* Configure for power always on */
587 else if (wl->quirks & WLCORE_QUIRK_NO_ELP)
561 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM); 588 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
562 if (ret < 0) 589 /* Configure for ELP power saving */
563 return ret; 590 else
564 } else if (!wl->sta_count) { 591 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
565 if (wl->quirks & WLCORE_QUIRK_NO_ELP) { 592
566 /* Configure for power always on */ 593 if (ret < 0)
567 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM); 594 return ret;
568 if (ret < 0)
569 return ret;
570 } else {
571 /* Configure for ELP power saving */
572 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
573 if (ret < 0)
574 return ret;
575 }
576 }
577 } 595 }
578 596
579 /* Mode specific init */ 597 /* Mode specific init */
diff --git a/drivers/net/wireless/ti/wlcore/io.c b/drivers/net/wireless/ti/wlcore/io.c
index 7cd0081aede5..68e74eefd296 100644
--- a/drivers/net/wireless/ti/wlcore/io.c
+++ b/drivers/net/wireless/ti/wlcore/io.c
@@ -48,12 +48,24 @@ void wlcore_disable_interrupts(struct wl1271 *wl)
48} 48}
49EXPORT_SYMBOL_GPL(wlcore_disable_interrupts); 49EXPORT_SYMBOL_GPL(wlcore_disable_interrupts);
50 50
51void wlcore_disable_interrupts_nosync(struct wl1271 *wl)
52{
53 disable_irq_nosync(wl->irq);
54}
55EXPORT_SYMBOL_GPL(wlcore_disable_interrupts_nosync);
56
51void wlcore_enable_interrupts(struct wl1271 *wl) 57void wlcore_enable_interrupts(struct wl1271 *wl)
52{ 58{
53 enable_irq(wl->irq); 59 enable_irq(wl->irq);
54} 60}
55EXPORT_SYMBOL_GPL(wlcore_enable_interrupts); 61EXPORT_SYMBOL_GPL(wlcore_enable_interrupts);
56 62
63void wlcore_synchronize_interrupts(struct wl1271 *wl)
64{
65 synchronize_irq(wl->irq);
66}
67EXPORT_SYMBOL_GPL(wlcore_synchronize_interrupts);
68
57int wlcore_translate_addr(struct wl1271 *wl, int addr) 69int wlcore_translate_addr(struct wl1271 *wl, int addr)
58{ 70{
59 struct wlcore_partition_set *part = &wl->curr_part; 71 struct wlcore_partition_set *part = &wl->curr_part;
@@ -122,9 +134,11 @@ EXPORT_SYMBOL_GPL(wlcore_translate_addr);
122 * | | 134 * | |
123 * 135 *
124 */ 136 */
125void wlcore_set_partition(struct wl1271 *wl, 137int wlcore_set_partition(struct wl1271 *wl,
126 const struct wlcore_partition_set *p) 138 const struct wlcore_partition_set *p)
127{ 139{
140 int ret;
141
128 /* copy partition info */ 142 /* copy partition info */
129 memcpy(&wl->curr_part, p, sizeof(*p)); 143 memcpy(&wl->curr_part, p, sizeof(*p));
130 144
@@ -137,28 +151,41 @@ void wlcore_set_partition(struct wl1271 *wl,
137 wl1271_debug(DEBUG_IO, "mem3_start %08X mem3_size %08X", 151 wl1271_debug(DEBUG_IO, "mem3_start %08X mem3_size %08X",
138 p->mem3.start, p->mem3.size); 152 p->mem3.start, p->mem3.size);
139 153
140 wl1271_raw_write32(wl, HW_PART0_START_ADDR, p->mem.start); 154 ret = wlcore_raw_write32(wl, HW_PART0_START_ADDR, p->mem.start);
141 wl1271_raw_write32(wl, HW_PART0_SIZE_ADDR, p->mem.size); 155 if (ret < 0)
142 wl1271_raw_write32(wl, HW_PART1_START_ADDR, p->reg.start); 156 goto out;
143 wl1271_raw_write32(wl, HW_PART1_SIZE_ADDR, p->reg.size); 157
144 wl1271_raw_write32(wl, HW_PART2_START_ADDR, p->mem2.start); 158 ret = wlcore_raw_write32(wl, HW_PART0_SIZE_ADDR, p->mem.size);
145 wl1271_raw_write32(wl, HW_PART2_SIZE_ADDR, p->mem2.size); 159 if (ret < 0)
160 goto out;
161
162 ret = wlcore_raw_write32(wl, HW_PART1_START_ADDR, p->reg.start);
163 if (ret < 0)
164 goto out;
165
166 ret = wlcore_raw_write32(wl, HW_PART1_SIZE_ADDR, p->reg.size);
167 if (ret < 0)
168 goto out;
169
170 ret = wlcore_raw_write32(wl, HW_PART2_START_ADDR, p->mem2.start);
171 if (ret < 0)
172 goto out;
173
174 ret = wlcore_raw_write32(wl, HW_PART2_SIZE_ADDR, p->mem2.size);
175 if (ret < 0)
176 goto out;
177
146 /* 178 /*
147 * We don't need the size of the last partition, as it is 179 * We don't need the size of the last partition, as it is
148 * automatically calculated based on the total memory size and 180 * automatically calculated based on the total memory size and
149 * the sizes of the previous partitions. 181 * the sizes of the previous partitions.
150 */ 182 */
151 wl1271_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start); 183 ret = wlcore_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start);
152}
153EXPORT_SYMBOL_GPL(wlcore_set_partition);
154
155void wlcore_select_partition(struct wl1271 *wl, u8 part)
156{
157 wl1271_debug(DEBUG_IO, "setting partition %d", part);
158 184
159 wlcore_set_partition(wl, &wl->ptable[part]); 185out:
186 return ret;
160} 187}
161EXPORT_SYMBOL_GPL(wlcore_select_partition); 188EXPORT_SYMBOL_GPL(wlcore_set_partition);
162 189
163void wl1271_io_reset(struct wl1271 *wl) 190void wl1271_io_reset(struct wl1271 *wl)
164{ 191{
diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h
index 8942954b56a0..259149f36fae 100644
--- a/drivers/net/wireless/ti/wlcore/io.h
+++ b/drivers/net/wireless/ti/wlcore/io.h
@@ -45,86 +45,122 @@
45struct wl1271; 45struct wl1271;
46 46
47void wlcore_disable_interrupts(struct wl1271 *wl); 47void wlcore_disable_interrupts(struct wl1271 *wl);
48void wlcore_disable_interrupts_nosync(struct wl1271 *wl);
48void wlcore_enable_interrupts(struct wl1271 *wl); 49void wlcore_enable_interrupts(struct wl1271 *wl);
50void wlcore_synchronize_interrupts(struct wl1271 *wl);
49 51
50void wl1271_io_reset(struct wl1271 *wl); 52void wl1271_io_reset(struct wl1271 *wl);
51void wl1271_io_init(struct wl1271 *wl); 53void wl1271_io_init(struct wl1271 *wl);
52int wlcore_translate_addr(struct wl1271 *wl, int addr); 54int wlcore_translate_addr(struct wl1271 *wl, int addr);
53 55
54/* Raw target IO, address is not translated */ 56/* Raw target IO, address is not translated */
55static inline void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf, 57static inline int __must_check wlcore_raw_write(struct wl1271 *wl, int addr,
56 size_t len, bool fixed) 58 void *buf, size_t len,
59 bool fixed)
57{ 60{
58 wl->if_ops->write(wl->dev, addr, buf, len, fixed); 61 int ret;
62
63 if (test_bit(WL1271_FLAG_IO_FAILED, &wl->flags))
64 return -EIO;
65
66 ret = wl->if_ops->write(wl->dev, addr, buf, len, fixed);
67 if (ret && wl->state != WL1271_STATE_OFF)
68 set_bit(WL1271_FLAG_IO_FAILED, &wl->flags);
69
70 return ret;
59} 71}
60 72
61static inline void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf, 73static inline int __must_check wlcore_raw_read(struct wl1271 *wl, int addr,
62 size_t len, bool fixed) 74 void *buf, size_t len,
75 bool fixed)
63{ 76{
64 wl->if_ops->read(wl->dev, addr, buf, len, fixed); 77 int ret;
78
79 if (test_bit(WL1271_FLAG_IO_FAILED, &wl->flags))
80 return -EIO;
81
82 ret = wl->if_ops->read(wl->dev, addr, buf, len, fixed);
83 if (ret && wl->state != WL1271_STATE_OFF)
84 set_bit(WL1271_FLAG_IO_FAILED, &wl->flags);
85
86 return ret;
65} 87}
66 88
67static inline void wlcore_raw_read_data(struct wl1271 *wl, int reg, void *buf, 89static inline int __must_check wlcore_raw_read_data(struct wl1271 *wl, int reg,
68 size_t len, bool fixed) 90 void *buf, size_t len,
91 bool fixed)
69{ 92{
70 wl1271_raw_read(wl, wl->rtable[reg], buf, len, fixed); 93 return wlcore_raw_read(wl, wl->rtable[reg], buf, len, fixed);
71} 94}
72 95
73static inline void wlcore_raw_write_data(struct wl1271 *wl, int reg, void *buf, 96static inline int __must_check wlcore_raw_write_data(struct wl1271 *wl, int reg,
74 size_t len, bool fixed) 97 void *buf, size_t len,
98 bool fixed)
75{ 99{
76 wl1271_raw_write(wl, wl->rtable[reg], buf, len, fixed); 100 return wlcore_raw_write(wl, wl->rtable[reg], buf, len, fixed);
77} 101}
78 102
79static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr) 103static inline int __must_check wlcore_raw_read32(struct wl1271 *wl, int addr,
104 u32 *val)
80{ 105{
81 wl1271_raw_read(wl, addr, &wl->buffer_32, 106 int ret;
82 sizeof(wl->buffer_32), false); 107
108 ret = wlcore_raw_read(wl, addr, &wl->buffer_32,
109 sizeof(wl->buffer_32), false);
110 if (ret < 0)
111 return ret;
112
113 if (val)
114 *val = le32_to_cpu(wl->buffer_32);
83 115
84 return le32_to_cpu(wl->buffer_32); 116 return 0;
85} 117}
86 118
87static inline void wl1271_raw_write32(struct wl1271 *wl, int addr, u32 val) 119static inline int __must_check wlcore_raw_write32(struct wl1271 *wl, int addr,
120 u32 val)
88{ 121{
89 wl->buffer_32 = cpu_to_le32(val); 122 wl->buffer_32 = cpu_to_le32(val);
90 wl1271_raw_write(wl, addr, &wl->buffer_32, 123 return wlcore_raw_write(wl, addr, &wl->buffer_32,
91 sizeof(wl->buffer_32), false); 124 sizeof(wl->buffer_32), false);
92} 125}
93 126
94static inline void wl1271_read(struct wl1271 *wl, int addr, void *buf, 127static inline int __must_check wlcore_read(struct wl1271 *wl, int addr,
95 size_t len, bool fixed) 128 void *buf, size_t len, bool fixed)
96{ 129{
97 int physical; 130 int physical;
98 131
99 physical = wlcore_translate_addr(wl, addr); 132 physical = wlcore_translate_addr(wl, addr);
100 133
101 wl1271_raw_read(wl, physical, buf, len, fixed); 134 return wlcore_raw_read(wl, physical, buf, len, fixed);
102} 135}
103 136
104static inline void wl1271_write(struct wl1271 *wl, int addr, void *buf, 137static inline int __must_check wlcore_write(struct wl1271 *wl, int addr,
105 size_t len, bool fixed) 138 void *buf, size_t len, bool fixed)
106{ 139{
107 int physical; 140 int physical;
108 141
109 physical = wlcore_translate_addr(wl, addr); 142 physical = wlcore_translate_addr(wl, addr);
110 143
111 wl1271_raw_write(wl, physical, buf, len, fixed); 144 return wlcore_raw_write(wl, physical, buf, len, fixed);
112} 145}
113 146
114static inline void wlcore_write_data(struct wl1271 *wl, int reg, void *buf, 147static inline int __must_check wlcore_write_data(struct wl1271 *wl, int reg,
115 size_t len, bool fixed) 148 void *buf, size_t len,
149 bool fixed)
116{ 150{
117 wl1271_write(wl, wl->rtable[reg], buf, len, fixed); 151 return wlcore_write(wl, wl->rtable[reg], buf, len, fixed);
118} 152}
119 153
120static inline void wlcore_read_data(struct wl1271 *wl, int reg, void *buf, 154static inline int __must_check wlcore_read_data(struct wl1271 *wl, int reg,
121 size_t len, bool fixed) 155 void *buf, size_t len,
156 bool fixed)
122{ 157{
123 wl1271_read(wl, wl->rtable[reg], buf, len, fixed); 158 return wlcore_read(wl, wl->rtable[reg], buf, len, fixed);
124} 159}
125 160
126static inline void wl1271_read_hwaddr(struct wl1271 *wl, int hwaddr, 161static inline int __must_check wlcore_read_hwaddr(struct wl1271 *wl, int hwaddr,
127 void *buf, size_t len, bool fixed) 162 void *buf, size_t len,
163 bool fixed)
128{ 164{
129 int physical; 165 int physical;
130 int addr; 166 int addr;
@@ -134,34 +170,47 @@ static inline void wl1271_read_hwaddr(struct wl1271 *wl, int hwaddr,
134 170
135 physical = wlcore_translate_addr(wl, addr); 171 physical = wlcore_translate_addr(wl, addr);
136 172
137 wl1271_raw_read(wl, physical, buf, len, fixed); 173 return wlcore_raw_read(wl, physical, buf, len, fixed);
138} 174}
139 175
140static inline u32 wl1271_read32(struct wl1271 *wl, int addr) 176static inline int __must_check wlcore_read32(struct wl1271 *wl, int addr,
177 u32 *val)
141{ 178{
142 return wl1271_raw_read32(wl, wlcore_translate_addr(wl, addr)); 179 return wlcore_raw_read32(wl, wlcore_translate_addr(wl, addr), val);
143} 180}
144 181
145static inline void wl1271_write32(struct wl1271 *wl, int addr, u32 val) 182static inline int __must_check wlcore_write32(struct wl1271 *wl, int addr,
183 u32 val)
146{ 184{
147 wl1271_raw_write32(wl, wlcore_translate_addr(wl, addr), val); 185 return wlcore_raw_write32(wl, wlcore_translate_addr(wl, addr), val);
148} 186}
149 187
150static inline u32 wlcore_read_reg(struct wl1271 *wl, int reg) 188static inline int __must_check wlcore_read_reg(struct wl1271 *wl, int reg,
189 u32 *val)
151{ 190{
152 return wl1271_raw_read32(wl, 191 return wlcore_raw_read32(wl,
153 wlcore_translate_addr(wl, wl->rtable[reg])); 192 wlcore_translate_addr(wl, wl->rtable[reg]),
193 val);
154} 194}
155 195
156static inline void wlcore_write_reg(struct wl1271 *wl, int reg, u32 val) 196static inline int __must_check wlcore_write_reg(struct wl1271 *wl, int reg,
197 u32 val)
157{ 198{
158 wl1271_raw_write32(wl, wlcore_translate_addr(wl, wl->rtable[reg]), val); 199 return wlcore_raw_write32(wl,
200 wlcore_translate_addr(wl, wl->rtable[reg]),
201 val);
159} 202}
160 203
161static inline void wl1271_power_off(struct wl1271 *wl) 204static inline void wl1271_power_off(struct wl1271 *wl)
162{ 205{
163 wl->if_ops->power(wl->dev, false); 206 int ret;
164 clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags); 207
208 if (!test_bit(WL1271_FLAG_GPIO_POWER, &wl->flags))
209 return;
210
211 ret = wl->if_ops->power(wl->dev, false);
212 if (!ret)
213 clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
165} 214}
166 215
167static inline int wl1271_power_on(struct wl1271 *wl) 216static inline int wl1271_power_on(struct wl1271 *wl)
@@ -173,8 +222,8 @@ static inline int wl1271_power_on(struct wl1271 *wl)
173 return ret; 222 return ret;
174} 223}
175 224
176void wlcore_set_partition(struct wl1271 *wl, 225int wlcore_set_partition(struct wl1271 *wl,
177 const struct wlcore_partition_set *p); 226 const struct wlcore_partition_set *p);
178 227
179bool wl1271_set_block_size(struct wl1271 *wl); 228bool wl1271_set_block_size(struct wl1271 *wl);
180 229
@@ -182,6 +231,4 @@ bool wl1271_set_block_size(struct wl1271 *wl);
182 231
183int wl1271_tx_dummy_packet(struct wl1271 *wl); 232int wl1271_tx_dummy_packet(struct wl1271 *wl);
184 233
185void wlcore_select_partition(struct wl1271 *wl, u8 part);
186
187#endif 234#endif
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index acef93390d3d..9f04b64dfa33 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -62,7 +62,7 @@ static bool no_recovery;
62static void __wl1271_op_remove_interface(struct wl1271 *wl, 62static void __wl1271_op_remove_interface(struct wl1271 *wl,
63 struct ieee80211_vif *vif, 63 struct ieee80211_vif *vif,
64 bool reset_tx_queues); 64 bool reset_tx_queues);
65static void wl1271_op_stop(struct ieee80211_hw *hw); 65static void wlcore_op_stop_locked(struct wl1271 *wl);
66static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif); 66static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
67 67
68static int wl12xx_set_authorized(struct wl1271 *wl, 68static int wl12xx_set_authorized(struct wl1271 *wl,
@@ -320,46 +320,6 @@ static void wlcore_adjust_conf(struct wl1271 *wl)
320 } 320 }
321} 321}
322 322
323static int wl1271_plt_init(struct wl1271 *wl)
324{
325 int ret;
326
327 ret = wl->ops->hw_init(wl);
328 if (ret < 0)
329 return ret;
330
331 ret = wl1271_acx_init_mem_config(wl);
332 if (ret < 0)
333 return ret;
334
335 ret = wl12xx_acx_mem_cfg(wl);
336 if (ret < 0)
337 goto out_free_memmap;
338
339 /* Enable data path */
340 ret = wl1271_cmd_data_path(wl, 1);
341 if (ret < 0)
342 goto out_free_memmap;
343
344 /* Configure for CAM power saving (ie. always active) */
345 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
346 if (ret < 0)
347 goto out_free_memmap;
348
349 /* configure PM */
350 ret = wl1271_acx_pm_config(wl);
351 if (ret < 0)
352 goto out_free_memmap;
353
354 return 0;
355
356 out_free_memmap:
357 kfree(wl->target_mem_map);
358 wl->target_mem_map = NULL;
359
360 return ret;
361}
362
363static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, 323static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
364 struct wl12xx_vif *wlvif, 324 struct wl12xx_vif *wlvif,
365 u8 hlid, u8 tx_pkts) 325 u8 hlid, u8 tx_pkts)
@@ -387,7 +347,7 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
387 347
388static void wl12xx_irq_update_links_status(struct wl1271 *wl, 348static void wl12xx_irq_update_links_status(struct wl1271 *wl,
389 struct wl12xx_vif *wlvif, 349 struct wl12xx_vif *wlvif,
390 struct wl_fw_status *status) 350 struct wl_fw_status_2 *status)
391{ 351{
392 struct wl1271_link *lnk; 352 struct wl1271_link *lnk;
393 u32 cur_fw_ps_map; 353 u32 cur_fw_ps_map;
@@ -418,8 +378,9 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl,
418 } 378 }
419} 379}
420 380
421static void wl12xx_fw_status(struct wl1271 *wl, 381static int wlcore_fw_status(struct wl1271 *wl,
422 struct wl_fw_status *status) 382 struct wl_fw_status_1 *status_1,
383 struct wl_fw_status_2 *status_2)
423{ 384{
424 struct wl12xx_vif *wlvif; 385 struct wl12xx_vif *wlvif;
425 struct timespec ts; 386 struct timespec ts;
@@ -427,38 +388,42 @@ static void wl12xx_fw_status(struct wl1271 *wl,
427 int avail, freed_blocks; 388 int avail, freed_blocks;
428 int i; 389 int i;
429 size_t status_len; 390 size_t status_len;
391 int ret;
430 392
431 status_len = sizeof(*status) + wl->fw_status_priv_len; 393 status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
394 sizeof(*status_2) + wl->fw_status_priv_len;
432 395
433 wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status, 396 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status_1,
434 status_len, false); 397 status_len, false);
398 if (ret < 0)
399 return ret;
435 400
436 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, " 401 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
437 "drv_rx_counter = %d, tx_results_counter = %d)", 402 "drv_rx_counter = %d, tx_results_counter = %d)",
438 status->intr, 403 status_1->intr,
439 status->fw_rx_counter, 404 status_1->fw_rx_counter,
440 status->drv_rx_counter, 405 status_1->drv_rx_counter,
441 status->tx_results_counter); 406 status_1->tx_results_counter);
442 407
443 for (i = 0; i < NUM_TX_QUEUES; i++) { 408 for (i = 0; i < NUM_TX_QUEUES; i++) {
444 /* prevent wrap-around in freed-packets counter */ 409 /* prevent wrap-around in freed-packets counter */
445 wl->tx_allocated_pkts[i] -= 410 wl->tx_allocated_pkts[i] -=
446 (status->counters.tx_released_pkts[i] - 411 (status_2->counters.tx_released_pkts[i] -
447 wl->tx_pkts_freed[i]) & 0xff; 412 wl->tx_pkts_freed[i]) & 0xff;
448 413
449 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i]; 414 wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i];
450 } 415 }
451 416
452 /* prevent wrap-around in total blocks counter */ 417 /* prevent wrap-around in total blocks counter */
453 if (likely(wl->tx_blocks_freed <= 418 if (likely(wl->tx_blocks_freed <=
454 le32_to_cpu(status->total_released_blks))) 419 le32_to_cpu(status_2->total_released_blks)))
455 freed_blocks = le32_to_cpu(status->total_released_blks) - 420 freed_blocks = le32_to_cpu(status_2->total_released_blks) -
456 wl->tx_blocks_freed; 421 wl->tx_blocks_freed;
457 else 422 else
458 freed_blocks = 0x100000000LL - wl->tx_blocks_freed + 423 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
459 le32_to_cpu(status->total_released_blks); 424 le32_to_cpu(status_2->total_released_blks);
460 425
461 wl->tx_blocks_freed = le32_to_cpu(status->total_released_blks); 426 wl->tx_blocks_freed = le32_to_cpu(status_2->total_released_blks);
462 427
463 wl->tx_allocated_blocks -= freed_blocks; 428 wl->tx_allocated_blocks -= freed_blocks;
464 429
@@ -474,7 +439,7 @@ static void wl12xx_fw_status(struct wl1271 *wl,
474 cancel_delayed_work(&wl->tx_watchdog_work); 439 cancel_delayed_work(&wl->tx_watchdog_work);
475 } 440 }
476 441
477 avail = le32_to_cpu(status->tx_total) - wl->tx_allocated_blocks; 442 avail = le32_to_cpu(status_2->tx_total) - wl->tx_allocated_blocks;
478 443
479 /* 444 /*
480 * The FW might change the total number of TX memblocks before 445 * The FW might change the total number of TX memblocks before
@@ -493,13 +458,15 @@ static void wl12xx_fw_status(struct wl1271 *wl,
493 458
494 /* for AP update num of allocated TX blocks per link and ps status */ 459 /* for AP update num of allocated TX blocks per link and ps status */
495 wl12xx_for_each_wlvif_ap(wl, wlvif) { 460 wl12xx_for_each_wlvif_ap(wl, wlvif) {
496 wl12xx_irq_update_links_status(wl, wlvif, status); 461 wl12xx_irq_update_links_status(wl, wlvif, status_2);
497 } 462 }
498 463
499 /* update the host-chipset time offset */ 464 /* update the host-chipset time offset */
500 getnstimeofday(&ts); 465 getnstimeofday(&ts);
501 wl->time_offset = (timespec_to_ns(&ts) >> 10) - 466 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
502 (s64)le32_to_cpu(status->fw_localtime); 467 (s64)le32_to_cpu(status_2->fw_localtime);
468
469 return 0;
503} 470}
504 471
505static void wl1271_flush_deferred_work(struct wl1271 *wl) 472static void wl1271_flush_deferred_work(struct wl1271 *wl)
@@ -527,20 +494,15 @@ static void wl1271_netstack_work(struct work_struct *work)
527 494
528#define WL1271_IRQ_MAX_LOOPS 256 495#define WL1271_IRQ_MAX_LOOPS 256
529 496
530static irqreturn_t wl1271_irq(int irq, void *cookie) 497static int wlcore_irq_locked(struct wl1271 *wl)
531{ 498{
532 int ret; 499 int ret = 0;
533 u32 intr; 500 u32 intr;
534 int loopcount = WL1271_IRQ_MAX_LOOPS; 501 int loopcount = WL1271_IRQ_MAX_LOOPS;
535 struct wl1271 *wl = (struct wl1271 *)cookie;
536 bool done = false; 502 bool done = false;
537 unsigned int defer_count; 503 unsigned int defer_count;
538 unsigned long flags; 504 unsigned long flags;
539 505
540 /* TX might be handled here, avoid redundant work */
541 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
542 cancel_work_sync(&wl->tx_work);
543
544 /* 506 /*
545 * In case edge triggered interrupt must be used, we cannot iterate 507 * In case edge triggered interrupt must be used, we cannot iterate
546 * more than once without introducing race conditions with the hardirq. 508 * more than once without introducing race conditions with the hardirq.
@@ -548,8 +510,6 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
548 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) 510 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
549 loopcount = 1; 511 loopcount = 1;
550 512
551 mutex_lock(&wl->mutex);
552
553 wl1271_debug(DEBUG_IRQ, "IRQ work"); 513 wl1271_debug(DEBUG_IRQ, "IRQ work");
554 514
555 if (unlikely(wl->state == WL1271_STATE_OFF)) 515 if (unlikely(wl->state == WL1271_STATE_OFF))
@@ -568,21 +528,33 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
568 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); 528 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
569 smp_mb__after_clear_bit(); 529 smp_mb__after_clear_bit();
570 530
571 wl12xx_fw_status(wl, wl->fw_status); 531 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
532 if (ret < 0)
533 goto out;
572 534
573 wlcore_hw_tx_immediate_compl(wl); 535 wlcore_hw_tx_immediate_compl(wl);
574 536
575 intr = le32_to_cpu(wl->fw_status->intr); 537 intr = le32_to_cpu(wl->fw_status_1->intr);
576 intr &= WL1271_INTR_MASK; 538 intr &= WLCORE_ALL_INTR_MASK;
577 if (!intr) { 539 if (!intr) {
578 done = true; 540 done = true;
579 continue; 541 continue;
580 } 542 }
581 543
582 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) { 544 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
583 wl1271_error("watchdog interrupt received! " 545 wl1271_error("HW watchdog interrupt received! starting recovery.");
546 wl->watchdog_recovery = true;
547 ret = -EIO;
548
549 /* restarting the chip. ignore any other interrupt. */
550 goto out;
551 }
552
553 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
554 wl1271_error("SW watchdog interrupt received! "
584 "starting recovery."); 555 "starting recovery.");
585 wl12xx_queue_recovery_work(wl); 556 wl->watchdog_recovery = true;
557 ret = -EIO;
586 558
587 /* restarting the chip. ignore any other interrupt. */ 559 /* restarting the chip. ignore any other interrupt. */
588 goto out; 560 goto out;
@@ -591,7 +563,9 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
591 if (likely(intr & WL1271_ACX_INTR_DATA)) { 563 if (likely(intr & WL1271_ACX_INTR_DATA)) {
592 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA"); 564 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
593 565
594 wl12xx_rx(wl, wl->fw_status); 566 ret = wlcore_rx(wl, wl->fw_status_1);
567 if (ret < 0)
568 goto out;
595 569
596 /* Check if any tx blocks were freed */ 570 /* Check if any tx blocks were freed */
597 spin_lock_irqsave(&wl->wl_lock, flags); 571 spin_lock_irqsave(&wl->wl_lock, flags);
@@ -602,13 +576,17 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
602 * In order to avoid starvation of the TX path, 576 * In order to avoid starvation of the TX path,
603 * call the work function directly. 577 * call the work function directly.
604 */ 578 */
605 wl1271_tx_work_locked(wl); 579 ret = wlcore_tx_work_locked(wl);
580 if (ret < 0)
581 goto out;
606 } else { 582 } else {
607 spin_unlock_irqrestore(&wl->wl_lock, flags); 583 spin_unlock_irqrestore(&wl->wl_lock, flags);
608 } 584 }
609 585
610 /* check for tx results */ 586 /* check for tx results */
611 wlcore_hw_tx_delayed_compl(wl); 587 ret = wlcore_hw_tx_delayed_compl(wl);
588 if (ret < 0)
589 goto out;
612 590
613 /* Make sure the deferred queues don't get too long */ 591 /* Make sure the deferred queues don't get too long */
614 defer_count = skb_queue_len(&wl->deferred_tx_queue) + 592 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
@@ -619,12 +597,16 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
619 597
620 if (intr & WL1271_ACX_INTR_EVENT_A) { 598 if (intr & WL1271_ACX_INTR_EVENT_A) {
621 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A"); 599 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
622 wl1271_event_handle(wl, 0); 600 ret = wl1271_event_handle(wl, 0);
601 if (ret < 0)
602 goto out;
623 } 603 }
624 604
625 if (intr & WL1271_ACX_INTR_EVENT_B) { 605 if (intr & WL1271_ACX_INTR_EVENT_B) {
626 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B"); 606 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
627 wl1271_event_handle(wl, 1); 607 ret = wl1271_event_handle(wl, 1);
608 if (ret < 0)
609 goto out;
628 } 610 }
629 611
630 if (intr & WL1271_ACX_INTR_INIT_COMPLETE) 612 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
@@ -638,6 +620,25 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
638 wl1271_ps_elp_sleep(wl); 620 wl1271_ps_elp_sleep(wl);
639 621
640out: 622out:
623 return ret;
624}
625
626static irqreturn_t wlcore_irq(int irq, void *cookie)
627{
628 int ret;
629 unsigned long flags;
630 struct wl1271 *wl = cookie;
631
632 /* TX might be handled here, avoid redundant work */
633 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
634 cancel_work_sync(&wl->tx_work);
635
636 mutex_lock(&wl->mutex);
637
638 ret = wlcore_irq_locked(wl);
639 if (ret)
640 wl12xx_queue_recovery_work(wl);
641
641 spin_lock_irqsave(&wl->wl_lock, flags); 642 spin_lock_irqsave(&wl->wl_lock, flags);
642 /* In case TX was not handled here, queue TX work */ 643 /* In case TX was not handled here, queue TX work */
643 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags); 644 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
@@ -743,7 +744,7 @@ out:
743 return ret; 744 return ret;
744} 745}
745 746
746static int wl1271_fetch_nvs(struct wl1271 *wl) 747static void wl1271_fetch_nvs(struct wl1271 *wl)
747{ 748{
748 const struct firmware *fw; 749 const struct firmware *fw;
749 int ret; 750 int ret;
@@ -751,16 +752,15 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
751 ret = request_firmware(&fw, WL12XX_NVS_NAME, wl->dev); 752 ret = request_firmware(&fw, WL12XX_NVS_NAME, wl->dev);
752 753
753 if (ret < 0) { 754 if (ret < 0) {
754 wl1271_error("could not get nvs file %s: %d", WL12XX_NVS_NAME, 755 wl1271_debug(DEBUG_BOOT, "could not get nvs file %s: %d",
755 ret); 756 WL12XX_NVS_NAME, ret);
756 return ret; 757 return;
757 } 758 }
758 759
759 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL); 760 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
760 761
761 if (!wl->nvs) { 762 if (!wl->nvs) {
762 wl1271_error("could not allocate memory for the nvs file"); 763 wl1271_error("could not allocate memory for the nvs file");
763 ret = -ENOMEM;
764 goto out; 764 goto out;
765 } 765 }
766 766
@@ -768,14 +768,17 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
768 768
769out: 769out:
770 release_firmware(fw); 770 release_firmware(fw);
771
772 return ret;
773} 771}
774 772
775void wl12xx_queue_recovery_work(struct wl1271 *wl) 773void wl12xx_queue_recovery_work(struct wl1271 *wl)
776{ 774{
777 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) 775 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
776
777 /* Avoid a recursive recovery */
778 if (!test_and_set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
779 wlcore_disable_interrupts_nosync(wl);
778 ieee80211_queue_work(wl->hw, &wl->recovery_work); 780 ieee80211_queue_work(wl->hw, &wl->recovery_work);
781 }
779} 782}
780 783
781size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen) 784size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
@@ -801,14 +804,17 @@ size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
801 return len; 804 return len;
802} 805}
803 806
807#define WLCORE_FW_LOG_END 0x2000000
808
804static void wl12xx_read_fwlog_panic(struct wl1271 *wl) 809static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
805{ 810{
806 u32 addr; 811 u32 addr;
807 u32 first_addr; 812 u32 offset;
813 u32 end_of_log;
808 u8 *block; 814 u8 *block;
815 int ret;
809 816
810 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) || 817 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
811 (wl->conf.fwlog.mode != WL12XX_FWLOG_ON_DEMAND) ||
812 (wl->conf.fwlog.mem_blocks == 0)) 818 (wl->conf.fwlog.mem_blocks == 0))
813 return; 819 return;
814 820
@@ -820,34 +826,49 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
820 826
821 /* 827 /*
822 * Make sure the chip is awake and the logger isn't active. 828 * Make sure the chip is awake and the logger isn't active.
823 * This might fail if the firmware hanged. 829 * Do not send a stop fwlog command if the fw is hanged.
824 */ 830 */
825 if (!wl1271_ps_elp_wakeup(wl)) 831 if (wl1271_ps_elp_wakeup(wl))
832 goto out;
833 if (!wl->watchdog_recovery)
826 wl12xx_cmd_stop_fwlog(wl); 834 wl12xx_cmd_stop_fwlog(wl);
827 835
828 /* Read the first memory block address */ 836 /* Read the first memory block address */
829 wl12xx_fw_status(wl, wl->fw_status); 837 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
830 first_addr = le32_to_cpu(wl->fw_status->log_start_addr); 838 if (ret < 0)
831 if (!first_addr)
832 goto out; 839 goto out;
833 840
841 addr = le32_to_cpu(wl->fw_status_2->log_start_addr);
842 if (!addr)
843 goto out;
844
845 if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
846 offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
847 end_of_log = WLCORE_FW_LOG_END;
848 } else {
849 offset = sizeof(addr);
850 end_of_log = addr;
851 }
852
834 /* Traverse the memory blocks linked list */ 853 /* Traverse the memory blocks linked list */
835 addr = first_addr;
836 do { 854 do {
837 memset(block, 0, WL12XX_HW_BLOCK_SIZE); 855 memset(block, 0, WL12XX_HW_BLOCK_SIZE);
838 wl1271_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE, 856 ret = wlcore_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE,
839 false); 857 false);
858 if (ret < 0)
859 goto out;
840 860
841 /* 861 /*
842 * Memory blocks are linked to one another. The first 4 bytes 862 * Memory blocks are linked to one another. The first 4 bytes
843 * of each memory block hold the hardware address of the next 863 * of each memory block hold the hardware address of the next
844 * one. The last memory block points to the first one. 864 * one. The last memory block points to the first one in
865 * on demand mode and is equal to 0x2000000 in continuous mode.
845 */ 866 */
846 addr = le32_to_cpup((__le32 *)block); 867 addr = le32_to_cpup((__le32 *)block);
847 if (!wl12xx_copy_fwlog(wl, block + sizeof(addr), 868 if (!wl12xx_copy_fwlog(wl, block + offset,
848 WL12XX_HW_BLOCK_SIZE - sizeof(addr))) 869 WL12XX_HW_BLOCK_SIZE - offset))
849 break; 870 break;
850 } while (addr && (addr != first_addr)); 871 } while (addr && (addr != end_of_log));
851 872
852 wake_up_interruptible(&wl->fwlog_waitq); 873 wake_up_interruptible(&wl->fwlog_waitq);
853 874
@@ -855,6 +876,34 @@ out:
855 kfree(block); 876 kfree(block);
856} 877}
857 878
879static void wlcore_print_recovery(struct wl1271 *wl)
880{
881 u32 pc = 0;
882 u32 hint_sts = 0;
883 int ret;
884
885 wl1271_info("Hardware recovery in progress. FW ver: %s",
886 wl->chip.fw_ver_str);
887
888 /* change partitions momentarily so we can read the FW pc */
889 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
890 if (ret < 0)
891 return;
892
893 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
894 if (ret < 0)
895 return;
896
897 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
898 if (ret < 0)
899 return;
900
901 wl1271_info("pc: 0x%x, hint_sts: 0x%08x", pc, hint_sts);
902
903 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
904}
905
906
858static void wl1271_recovery_work(struct work_struct *work) 907static void wl1271_recovery_work(struct work_struct *work)
859{ 908{
860 struct wl1271 *wl = 909 struct wl1271 *wl =
@@ -867,26 +916,19 @@ static void wl1271_recovery_work(struct work_struct *work)
867 if (wl->state != WL1271_STATE_ON || wl->plt) 916 if (wl->state != WL1271_STATE_ON || wl->plt)
868 goto out_unlock; 917 goto out_unlock;
869 918
870 /* Avoid a recursive recovery */ 919 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
871 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags); 920 wl12xx_read_fwlog_panic(wl);
872 921 wlcore_print_recovery(wl);
873 wl12xx_read_fwlog_panic(wl); 922 }
874
875 wl1271_info("Hardware recovery in progress. FW ver: %s pc: 0x%x",
876 wl->chip.fw_ver_str,
877 wlcore_read_reg(wl, REG_PC_ON_RECOVERY));
878 923
879 BUG_ON(bug_on_recovery && 924 BUG_ON(bug_on_recovery &&
880 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)); 925 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
881 926
882 if (no_recovery) { 927 if (no_recovery) {
883 wl1271_info("No recovery (chosen on module load). Fw will remain stuck."); 928 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
884 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
885 goto out_unlock; 929 goto out_unlock;
886 } 930 }
887 931
888 BUG_ON(bug_on_recovery);
889
890 /* 932 /*
891 * Advance security sequence number to overcome potential progress 933 * Advance security sequence number to overcome potential progress
892 * in the firmware during recovery. This doens't hurt if the network is 934 * in the firmware during recovery. This doens't hurt if the network is
@@ -900,7 +942,7 @@ static void wl1271_recovery_work(struct work_struct *work)
900 } 942 }
901 943
902 /* Prevent spurious TX during FW restart */ 944 /* Prevent spurious TX during FW restart */
903 ieee80211_stop_queues(wl->hw); 945 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
904 946
905 if (wl->sched_scanning) { 947 if (wl->sched_scanning) {
906 ieee80211_sched_scan_stopped(wl->hw); 948 ieee80211_sched_scan_stopped(wl->hw);
@@ -914,10 +956,8 @@ static void wl1271_recovery_work(struct work_struct *work)
914 vif = wl12xx_wlvif_to_vif(wlvif); 956 vif = wl12xx_wlvif_to_vif(wlvif);
915 __wl1271_op_remove_interface(wl, vif, false); 957 __wl1271_op_remove_interface(wl, vif, false);
916 } 958 }
917 mutex_unlock(&wl->mutex);
918 wl1271_op_stop(wl->hw);
919 959
920 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags); 960 wlcore_op_stop_locked(wl);
921 961
922 ieee80211_restart_hw(wl->hw); 962 ieee80211_restart_hw(wl->hw);
923 963
@@ -925,26 +965,34 @@ static void wl1271_recovery_work(struct work_struct *work)
925 * Its safe to enable TX now - the queues are stopped after a request 965 * Its safe to enable TX now - the queues are stopped after a request
926 * to restart the HW. 966 * to restart the HW.
927 */ 967 */
928 ieee80211_wake_queues(wl->hw); 968 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
929 return; 969
930out_unlock: 970out_unlock:
971 wl->watchdog_recovery = false;
972 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
931 mutex_unlock(&wl->mutex); 973 mutex_unlock(&wl->mutex);
932} 974}
933 975
934static void wl1271_fw_wakeup(struct wl1271 *wl) 976static int wlcore_fw_wakeup(struct wl1271 *wl)
935{ 977{
936 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP); 978 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
937} 979}
938 980
939static int wl1271_setup(struct wl1271 *wl) 981static int wl1271_setup(struct wl1271 *wl)
940{ 982{
941 wl->fw_status = kmalloc(sizeof(*wl->fw_status), GFP_KERNEL); 983 wl->fw_status_1 = kmalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
942 if (!wl->fw_status) 984 sizeof(*wl->fw_status_2) +
985 wl->fw_status_priv_len, GFP_KERNEL);
986 if (!wl->fw_status_1)
943 return -ENOMEM; 987 return -ENOMEM;
944 988
989 wl->fw_status_2 = (struct wl_fw_status_2 *)
990 (((u8 *) wl->fw_status_1) +
991 WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc));
992
945 wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL); 993 wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
946 if (!wl->tx_res_if) { 994 if (!wl->tx_res_if) {
947 kfree(wl->fw_status); 995 kfree(wl->fw_status_1);
948 return -ENOMEM; 996 return -ENOMEM;
949 } 997 }
950 998
@@ -963,13 +1011,21 @@ static int wl12xx_set_power_on(struct wl1271 *wl)
963 wl1271_io_reset(wl); 1011 wl1271_io_reset(wl);
964 wl1271_io_init(wl); 1012 wl1271_io_init(wl);
965 1013
966 wlcore_set_partition(wl, &wl->ptable[PART_BOOT]); 1014 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1015 if (ret < 0)
1016 goto fail;
967 1017
968 /* ELP module wake up */ 1018 /* ELP module wake up */
969 wl1271_fw_wakeup(wl); 1019 ret = wlcore_fw_wakeup(wl);
1020 if (ret < 0)
1021 goto fail;
970 1022
971out: 1023out:
972 return ret; 1024 return ret;
1025
1026fail:
1027 wl1271_power_off(wl);
1028 return ret;
973} 1029}
974 1030
975static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt) 1031static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
@@ -987,13 +1043,12 @@ static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
987 * simplify the code and since the performance impact is 1043 * simplify the code and since the performance impact is
988 * negligible, we use the same block size for all different 1044 * negligible, we use the same block size for all different
989 * chip types. 1045 * chip types.
1046 *
1047 * Check if the bus supports blocksize alignment and, if it
1048 * doesn't, make sure we don't have the quirk.
990 */ 1049 */
991 if (wl1271_set_block_size(wl)) 1050 if (!wl1271_set_block_size(wl))
992 wl->quirks |= WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN; 1051 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
993
994 ret = wl->ops->identify_chip(wl);
995 if (ret < 0)
996 goto out;
997 1052
998 /* TODO: make sure the lower driver has set things up correctly */ 1053 /* TODO: make sure the lower driver has set things up correctly */
999 1054
@@ -1005,13 +1060,6 @@ static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1005 if (ret < 0) 1060 if (ret < 0)
1006 goto out; 1061 goto out;
1007 1062
1008 /* No NVS from netlink, try to get it from the filesystem */
1009 if (wl->nvs == NULL) {
1010 ret = wl1271_fetch_nvs(wl);
1011 if (ret < 0)
1012 goto out;
1013 }
1014
1015out: 1063out:
1016 return ret; 1064 return ret;
1017} 1065}
@@ -1039,14 +1087,10 @@ int wl1271_plt_start(struct wl1271 *wl)
1039 if (ret < 0) 1087 if (ret < 0)
1040 goto power_off; 1088 goto power_off;
1041 1089
1042 ret = wl->ops->boot(wl); 1090 ret = wl->ops->plt_init(wl);
1043 if (ret < 0) 1091 if (ret < 0)
1044 goto power_off; 1092 goto power_off;
1045 1093
1046 ret = wl1271_plt_init(wl);
1047 if (ret < 0)
1048 goto irq_disable;
1049
1050 wl->plt = true; 1094 wl->plt = true;
1051 wl->state = WL1271_STATE_ON; 1095 wl->state = WL1271_STATE_ON;
1052 wl1271_notice("firmware booted in PLT mode (%s)", 1096 wl1271_notice("firmware booted in PLT mode (%s)",
@@ -1059,19 +1103,6 @@ int wl1271_plt_start(struct wl1271 *wl)
1059 1103
1060 goto out; 1104 goto out;
1061 1105
1062irq_disable:
1063 mutex_unlock(&wl->mutex);
1064 /* Unlocking the mutex in the middle of handling is
1065 inherently unsafe. In this case we deem it safe to do,
1066 because we need to let any possibly pending IRQ out of
1067 the system (and while we are WL1271_STATE_OFF the IRQ
1068 work function will not do anything.) Also, any other
1069 possible concurrent operations will fail due to the
1070 current state, hence the wl1271 struct should be safe. */
1071 wlcore_disable_interrupts(wl);
1072 wl1271_flush_deferred_work(wl);
1073 cancel_work_sync(&wl->netstack_work);
1074 mutex_lock(&wl->mutex);
1075power_off: 1106power_off:
1076 wl1271_power_off(wl); 1107 wl1271_power_off(wl);
1077 } 1108 }
@@ -1125,6 +1156,7 @@ int wl1271_plt_stop(struct wl1271 *wl)
1125 mutex_lock(&wl->mutex); 1156 mutex_lock(&wl->mutex);
1126 wl1271_power_off(wl); 1157 wl1271_power_off(wl);
1127 wl->flags = 0; 1158 wl->flags = 0;
1159 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1128 wl->state = WL1271_STATE_OFF; 1160 wl->state = WL1271_STATE_OFF;
1129 wl->plt = false; 1161 wl->plt = false;
1130 wl->rx_counter = 0; 1162 wl->rx_counter = 0;
@@ -1154,9 +1186,16 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1154 1186
1155 spin_lock_irqsave(&wl->wl_lock, flags); 1187 spin_lock_irqsave(&wl->wl_lock, flags);
1156 1188
1157 /* queue the packet */ 1189 /*
1190 * drop the packet if the link is invalid or the queue is stopped
1191 * for any reason but watermark. Watermark is a "soft"-stop so we
1192 * allow these packets through.
1193 */
1158 if (hlid == WL12XX_INVALID_LINK_ID || 1194 if (hlid == WL12XX_INVALID_LINK_ID ||
1159 (wlvif && !test_bit(hlid, wlvif->links_map))) { 1195 (wlvif && !test_bit(hlid, wlvif->links_map)) ||
1196 (wlcore_is_queue_stopped(wl, q) &&
1197 !wlcore_is_queue_stopped_by_reason(wl, q,
1198 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1160 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q); 1199 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1161 ieee80211_free_txskb(hw, skb); 1200 ieee80211_free_txskb(hw, skb);
1162 goto out; 1201 goto out;
@@ -1172,10 +1211,12 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1172 * The workqueue is slow to process the tx_queue and we need stop 1211 * The workqueue is slow to process the tx_queue and we need stop
1173 * the queue here, otherwise the queue will get too long. 1212 * the queue here, otherwise the queue will get too long.
1174 */ 1213 */
1175 if (wl->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK) { 1214 if (wl->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1215 !wlcore_is_queue_stopped_by_reason(wl, q,
1216 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1176 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q); 1217 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1177 ieee80211_stop_queue(wl->hw, mapping); 1218 wlcore_stop_queue_locked(wl, q,
1178 set_bit(q, &wl->stopped_queues_map); 1219 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1179 } 1220 }
1180 1221
1181 /* 1222 /*
@@ -1209,7 +1250,7 @@ int wl1271_tx_dummy_packet(struct wl1271 *wl)
1209 1250
1210 /* The FW is low on RX memory blocks, so send the dummy packet asap */ 1251 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1211 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) 1252 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1212 wl1271_tx_work_locked(wl); 1253 return wlcore_tx_work_locked(wl);
1213 1254
1214 /* 1255 /*
1215 * If the FW TX is busy, TX work will be scheduled by the threaded 1256 * If the FW TX is busy, TX work will be scheduled by the threaded
@@ -1476,8 +1517,15 @@ static int wl1271_configure_wowlan(struct wl1271 *wl,
1476 int i, ret; 1517 int i, ret;
1477 1518
1478 if (!wow || wow->any || !wow->n_patterns) { 1519 if (!wow || wow->any || !wow->n_patterns) {
1479 wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL); 1520 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1480 wl1271_rx_filter_clear_all(wl); 1521 FILTER_SIGNAL);
1522 if (ret)
1523 goto out;
1524
1525 ret = wl1271_rx_filter_clear_all(wl);
1526 if (ret)
1527 goto out;
1528
1481 return 0; 1529 return 0;
1482 } 1530 }
1483 1531
@@ -1493,8 +1541,13 @@ static int wl1271_configure_wowlan(struct wl1271 *wl,
1493 } 1541 }
1494 } 1542 }
1495 1543
1496 wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL); 1544 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1497 wl1271_rx_filter_clear_all(wl); 1545 if (ret)
1546 goto out;
1547
1548 ret = wl1271_rx_filter_clear_all(wl);
1549 if (ret)
1550 goto out;
1498 1551
1499 /* Translate WoWLAN patterns into filters */ 1552 /* Translate WoWLAN patterns into filters */
1500 for (i = 0; i < wow->n_patterns; i++) { 1553 for (i = 0; i < wow->n_patterns; i++) {
@@ -1536,7 +1589,10 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1536 if (ret < 0) 1589 if (ret < 0)
1537 goto out; 1590 goto out;
1538 1591
1539 wl1271_configure_wowlan(wl, wow); 1592 ret = wl1271_configure_wowlan(wl, wow);
1593 if (ret < 0)
1594 goto out_sleep;
1595
1540 ret = wl1271_acx_wake_up_conditions(wl, wlvif, 1596 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1541 wl->conf.conn.suspend_wake_up_event, 1597 wl->conf.conn.suspend_wake_up_event,
1542 wl->conf.conn.suspend_listen_interval); 1598 wl->conf.conn.suspend_listen_interval);
@@ -1544,8 +1600,8 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1544 if (ret < 0) 1600 if (ret < 0)
1545 wl1271_error("suspend: set wake up conditions failed: %d", ret); 1601 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1546 1602
1603out_sleep:
1547 wl1271_ps_elp_sleep(wl); 1604 wl1271_ps_elp_sleep(wl);
1548
1549out: 1605out:
1550 return ret; 1606 return ret;
1551 1607
@@ -1624,6 +1680,12 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
1624 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow); 1680 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1625 WARN_ON(!wow); 1681 WARN_ON(!wow);
1626 1682
1683 /* we want to perform the recovery before suspending */
1684 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1685 wl1271_warning("postponing suspend to perform recovery");
1686 return -EBUSY;
1687 }
1688
1627 wl1271_tx_flush(wl); 1689 wl1271_tx_flush(wl);
1628 1690
1629 mutex_lock(&wl->mutex); 1691 mutex_lock(&wl->mutex);
@@ -1664,7 +1726,8 @@ static int wl1271_op_resume(struct ieee80211_hw *hw)
1664 struct wl1271 *wl = hw->priv; 1726 struct wl1271 *wl = hw->priv;
1665 struct wl12xx_vif *wlvif; 1727 struct wl12xx_vif *wlvif;
1666 unsigned long flags; 1728 unsigned long flags;
1667 bool run_irq_work = false; 1729 bool run_irq_work = false, pending_recovery;
1730 int ret;
1668 1731
1669 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d", 1732 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1670 wl->wow_enabled); 1733 wl->wow_enabled);
@@ -1680,17 +1743,37 @@ static int wl1271_op_resume(struct ieee80211_hw *hw)
1680 run_irq_work = true; 1743 run_irq_work = true;
1681 spin_unlock_irqrestore(&wl->wl_lock, flags); 1744 spin_unlock_irqrestore(&wl->wl_lock, flags);
1682 1745
1746 mutex_lock(&wl->mutex);
1747
1748 /* test the recovery flag before calling any SDIO functions */
1749 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1750 &wl->flags);
1751
1683 if (run_irq_work) { 1752 if (run_irq_work) {
1684 wl1271_debug(DEBUG_MAC80211, 1753 wl1271_debug(DEBUG_MAC80211,
1685 "run postponed irq_work directly"); 1754 "run postponed irq_work directly");
1686 wl1271_irq(0, wl); 1755
1756 /* don't talk to the HW if recovery is pending */
1757 if (!pending_recovery) {
1758 ret = wlcore_irq_locked(wl);
1759 if (ret)
1760 wl12xx_queue_recovery_work(wl);
1761 }
1762
1687 wlcore_enable_interrupts(wl); 1763 wlcore_enable_interrupts(wl);
1688 } 1764 }
1689 1765
1690 mutex_lock(&wl->mutex); 1766 if (pending_recovery) {
1767 wl1271_warning("queuing forgotten recovery on resume");
1768 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1769 goto out;
1770 }
1771
1691 wl12xx_for_each_wlvif(wl, wlvif) { 1772 wl12xx_for_each_wlvif(wl, wlvif) {
1692 wl1271_configure_resume(wl, wlvif); 1773 wl1271_configure_resume(wl, wlvif);
1693 } 1774 }
1775
1776out:
1694 wl->wow_enabled = false; 1777 wl->wow_enabled = false;
1695 mutex_unlock(&wl->mutex); 1778 mutex_unlock(&wl->mutex);
1696 1779
@@ -1716,29 +1799,15 @@ static int wl1271_op_start(struct ieee80211_hw *hw)
1716 return 0; 1799 return 0;
1717} 1800}
1718 1801
1719static void wl1271_op_stop(struct ieee80211_hw *hw) 1802static void wlcore_op_stop_locked(struct wl1271 *wl)
1720{ 1803{
1721 struct wl1271 *wl = hw->priv;
1722 int i; 1804 int i;
1723 1805
1724 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1725
1726 /*
1727 * Interrupts must be disabled before setting the state to OFF.
1728 * Otherwise, the interrupt handler might be called and exit without
1729 * reading the interrupt status.
1730 */
1731 wlcore_disable_interrupts(wl);
1732 mutex_lock(&wl->mutex);
1733 if (wl->state == WL1271_STATE_OFF) { 1806 if (wl->state == WL1271_STATE_OFF) {
1734 mutex_unlock(&wl->mutex); 1807 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1808 &wl->flags))
1809 wlcore_enable_interrupts(wl);
1735 1810
1736 /*
1737 * This will not necessarily enable interrupts as interrupts
1738 * may have been disabled when op_stop was called. It will,
1739 * however, balance the above call to disable_interrupts().
1740 */
1741 wlcore_enable_interrupts(wl);
1742 return; 1811 return;
1743 } 1812 }
1744 1813
@@ -1747,8 +1816,16 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
1747 * functions don't perform further work. 1816 * functions don't perform further work.
1748 */ 1817 */
1749 wl->state = WL1271_STATE_OFF; 1818 wl->state = WL1271_STATE_OFF;
1819
1820 /*
1821 * Use the nosync variant to disable interrupts, so the mutex could be
1822 * held while doing so without deadlocking.
1823 */
1824 wlcore_disable_interrupts_nosync(wl);
1825
1750 mutex_unlock(&wl->mutex); 1826 mutex_unlock(&wl->mutex);
1751 1827
1828 wlcore_synchronize_interrupts(wl);
1752 wl1271_flush_deferred_work(wl); 1829 wl1271_flush_deferred_work(wl);
1753 cancel_delayed_work_sync(&wl->scan_complete_work); 1830 cancel_delayed_work_sync(&wl->scan_complete_work);
1754 cancel_work_sync(&wl->netstack_work); 1831 cancel_work_sync(&wl->netstack_work);
@@ -1758,15 +1835,23 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
1758 cancel_delayed_work_sync(&wl->connection_loss_work); 1835 cancel_delayed_work_sync(&wl->connection_loss_work);
1759 1836
1760 /* let's notify MAC80211 about the remaining pending TX frames */ 1837 /* let's notify MAC80211 about the remaining pending TX frames */
1761 wl12xx_tx_reset(wl, true); 1838 wl12xx_tx_reset(wl);
1762 mutex_lock(&wl->mutex); 1839 mutex_lock(&wl->mutex);
1763 1840
1764 wl1271_power_off(wl); 1841 wl1271_power_off(wl);
1842 /*
1843 * In case a recovery was scheduled, interrupts were disabled to avoid
1844 * an interrupt storm. Now that the power is down, it is safe to
1845 * re-enable interrupts to balance the disable depth
1846 */
1847 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1848 wlcore_enable_interrupts(wl);
1765 1849
1766 wl->band = IEEE80211_BAND_2GHZ; 1850 wl->band = IEEE80211_BAND_2GHZ;
1767 1851
1768 wl->rx_counter = 0; 1852 wl->rx_counter = 0;
1769 wl->power_level = WL1271_DEFAULT_POWER_LEVEL; 1853 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1854 wl->channel_type = NL80211_CHAN_NO_HT;
1770 wl->tx_blocks_available = 0; 1855 wl->tx_blocks_available = 0;
1771 wl->tx_allocated_blocks = 0; 1856 wl->tx_allocated_blocks = 0;
1772 wl->tx_results_count = 0; 1857 wl->tx_results_count = 0;
@@ -1775,6 +1860,7 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
1775 wl->ap_fw_ps_map = 0; 1860 wl->ap_fw_ps_map = 0;
1776 wl->ap_ps_map = 0; 1861 wl->ap_ps_map = 0;
1777 wl->sched_scanning = false; 1862 wl->sched_scanning = false;
1863 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1778 memset(wl->roles_map, 0, sizeof(wl->roles_map)); 1864 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1779 memset(wl->links_map, 0, sizeof(wl->links_map)); 1865 memset(wl->links_map, 0, sizeof(wl->links_map));
1780 memset(wl->roc_map, 0, sizeof(wl->roc_map)); 1866 memset(wl->roc_map, 0, sizeof(wl->roc_map));
@@ -1799,12 +1885,24 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
1799 1885
1800 wl1271_debugfs_reset(wl); 1886 wl1271_debugfs_reset(wl);
1801 1887
1802 kfree(wl->fw_status); 1888 kfree(wl->fw_status_1);
1803 wl->fw_status = NULL; 1889 wl->fw_status_1 = NULL;
1890 wl->fw_status_2 = NULL;
1804 kfree(wl->tx_res_if); 1891 kfree(wl->tx_res_if);
1805 wl->tx_res_if = NULL; 1892 wl->tx_res_if = NULL;
1806 kfree(wl->target_mem_map); 1893 kfree(wl->target_mem_map);
1807 wl->target_mem_map = NULL; 1894 wl->target_mem_map = NULL;
1895}
1896
1897static void wlcore_op_stop(struct ieee80211_hw *hw)
1898{
1899 struct wl1271 *wl = hw->priv;
1900
1901 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1902
1903 mutex_lock(&wl->mutex);
1904
1905 wlcore_op_stop_locked(wl);
1808 1906
1809 mutex_unlock(&wl->mutex); 1907 mutex_unlock(&wl->mutex);
1810} 1908}
@@ -1894,6 +1992,9 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
1894 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx); 1992 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
1895 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx); 1993 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
1896 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx); 1994 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
1995 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
1996 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
1997 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
1897 } else { 1998 } else {
1898 /* init ap data */ 1999 /* init ap data */
1899 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID; 2000 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
@@ -1903,13 +2004,19 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
1903 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++) 2004 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
1904 wl12xx_allocate_rate_policy(wl, 2005 wl12xx_allocate_rate_policy(wl,
1905 &wlvif->ap.ucast_rate_idx[i]); 2006 &wlvif->ap.ucast_rate_idx[i]);
2007 wlvif->basic_rate_set = CONF_TX_AP_ENABLED_RATES;
2008 /*
2009 * TODO: check if basic_rate shouldn't be
2010 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2011 * instead (the same thing for STA above).
2012 */
2013 wlvif->basic_rate = CONF_TX_AP_ENABLED_RATES;
2014 /* TODO: this seems to be used only for STA, check it */
2015 wlvif->rate_set = CONF_TX_AP_ENABLED_RATES;
1906 } 2016 }
1907 2017
1908 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate; 2018 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
1909 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5; 2019 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
1910 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
1911 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
1912 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
1913 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT; 2020 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
1914 2021
1915 /* 2022 /*
@@ -1919,6 +2026,7 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
1919 wlvif->band = wl->band; 2026 wlvif->band = wl->band;
1920 wlvif->channel = wl->channel; 2027 wlvif->channel = wl->channel;
1921 wlvif->power_level = wl->power_level; 2028 wlvif->power_level = wl->power_level;
2029 wlvif->channel_type = wl->channel_type;
1922 2030
1923 INIT_WORK(&wlvif->rx_streaming_enable_work, 2031 INIT_WORK(&wlvif->rx_streaming_enable_work,
1924 wl1271_rx_streaming_enable_work); 2032 wl1271_rx_streaming_enable_work);
@@ -2170,6 +2278,7 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
2170{ 2278{
2171 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 2279 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2172 int i, ret; 2280 int i, ret;
2281 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2173 2282
2174 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface"); 2283 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2175 2284
@@ -2250,11 +2359,25 @@ deinit:
2250 wlvif->role_id = WL12XX_INVALID_ROLE_ID; 2359 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2251 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID; 2360 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2252 2361
2253 if (wlvif->bss_type == BSS_TYPE_AP_BSS) 2362 if (is_ap)
2254 wl->ap_count--; 2363 wl->ap_count--;
2255 else 2364 else
2256 wl->sta_count--; 2365 wl->sta_count--;
2257 2366
2367 /* Last AP, have more stations. Configure according to STA. */
2368 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2369 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2370 /* Configure for power according to debugfs */
2371 if (sta_auth != WL1271_PSM_ILLEGAL)
2372 wl1271_acx_sleep_auth(wl, sta_auth);
2373 /* Configure for power always on */
2374 else if (wl->quirks & WLCORE_QUIRK_NO_ELP)
2375 wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
2376 /* Configure for ELP power saving */
2377 else
2378 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2379 }
2380
2258 mutex_unlock(&wl->mutex); 2381 mutex_unlock(&wl->mutex);
2259 2382
2260 del_timer_sync(&wlvif->rx_streaming_timer); 2383 del_timer_sync(&wlvif->rx_streaming_timer);
@@ -2444,7 +2567,7 @@ static int wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2444 } else { 2567 } else {
2445 /* The current firmware only supports sched_scan in idle */ 2568 /* The current firmware only supports sched_scan in idle */
2446 if (wl->sched_scanning) { 2569 if (wl->sched_scanning) {
2447 wl1271_scan_sched_scan_stop(wl); 2570 wl1271_scan_sched_scan_stop(wl, wlvif);
2448 ieee80211_sched_scan_stopped(wl->hw); 2571 ieee80211_sched_scan_stopped(wl->hw);
2449 } 2572 }
2450 2573
@@ -2469,13 +2592,24 @@ static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2469 /* if the channel changes while joined, join again */ 2592 /* if the channel changes while joined, join again */
2470 if (changed & IEEE80211_CONF_CHANGE_CHANNEL && 2593 if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
2471 ((wlvif->band != conf->channel->band) || 2594 ((wlvif->band != conf->channel->band) ||
2472 (wlvif->channel != channel))) { 2595 (wlvif->channel != channel) ||
2596 (wlvif->channel_type != conf->channel_type))) {
2473 /* send all pending packets */ 2597 /* send all pending packets */
2474 wl1271_tx_work_locked(wl); 2598 ret = wlcore_tx_work_locked(wl);
2599 if (ret < 0)
2600 return ret;
2601
2475 wlvif->band = conf->channel->band; 2602 wlvif->band = conf->channel->band;
2476 wlvif->channel = channel; 2603 wlvif->channel = channel;
2604 wlvif->channel_type = conf->channel_type;
2477 2605
2478 if (!is_ap) { 2606 if (is_ap) {
2607 wl1271_set_band_rate(wl, wlvif);
2608 ret = wl1271_init_ap_rates(wl, wlvif);
2609 if (ret < 0)
2610 wl1271_error("AP rate policy change failed %d",
2611 ret);
2612 } else {
2479 /* 2613 /*
2480 * FIXME: the mac80211 should really provide a fixed 2614 * FIXME: the mac80211 should really provide a fixed
2481 * rate to use here. for now, just use the smallest 2615 * rate to use here. for now, just use the smallest
@@ -2583,8 +2717,9 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2583 * frames, such as the deauth. To make sure those frames reach the air, 2717 * frames, such as the deauth. To make sure those frames reach the air,
2584 * wait here until the TX queue is fully flushed. 2718 * wait here until the TX queue is fully flushed.
2585 */ 2719 */
2586 if ((changed & IEEE80211_CONF_CHANGE_IDLE) && 2720 if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) ||
2587 (conf->flags & IEEE80211_CONF_IDLE)) 2721 ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
2722 (conf->flags & IEEE80211_CONF_IDLE)))
2588 wl1271_tx_flush(wl); 2723 wl1271_tx_flush(wl);
2589 2724
2590 mutex_lock(&wl->mutex); 2725 mutex_lock(&wl->mutex);
@@ -2593,6 +2728,7 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2593 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 2728 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
2594 wl->band = conf->channel->band; 2729 wl->band = conf->channel->band;
2595 wl->channel = channel; 2730 wl->channel = channel;
2731 wl->channel_type = conf->channel_type;
2596 } 2732 }
2597 2733
2598 if (changed & IEEE80211_CONF_CHANGE_POWER) 2734 if (changed & IEEE80211_CONF_CHANGE_POWER)
@@ -2825,17 +2961,6 @@ static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2825 int ret; 2961 int ret;
2826 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); 2962 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2827 2963
2828 /*
2829 * A role set to GEM cipher requires different Tx settings (namely
2830 * spare blocks). Note when we are in this mode so the HW can adjust.
2831 */
2832 if (key_type == KEY_GEM) {
2833 if (action == KEY_ADD_OR_REPLACE)
2834 wlvif->is_gem = true;
2835 else if (action == KEY_REMOVE)
2836 wlvif->is_gem = false;
2837 }
2838
2839 if (is_ap) { 2964 if (is_ap) {
2840 struct wl1271_station *wl_sta; 2965 struct wl1271_station *wl_sta;
2841 u8 hlid; 2966 u8 hlid;
@@ -2913,12 +3038,21 @@ static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2913 return 0; 3038 return 0;
2914} 3039}
2915 3040
2916static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 3041static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2917 struct ieee80211_vif *vif, 3042 struct ieee80211_vif *vif,
2918 struct ieee80211_sta *sta, 3043 struct ieee80211_sta *sta,
2919 struct ieee80211_key_conf *key_conf) 3044 struct ieee80211_key_conf *key_conf)
2920{ 3045{
2921 struct wl1271 *wl = hw->priv; 3046 struct wl1271 *wl = hw->priv;
3047
3048 return wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3049}
3050
3051int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3052 struct ieee80211_vif *vif,
3053 struct ieee80211_sta *sta,
3054 struct ieee80211_key_conf *key_conf)
3055{
2922 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 3056 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2923 int ret; 3057 int ret;
2924 u32 tx_seq_32 = 0; 3058 u32 tx_seq_32 = 0;
@@ -3029,6 +3163,7 @@ out_unlock:
3029 3163
3030 return ret; 3164 return ret;
3031} 3165}
3166EXPORT_SYMBOL_GPL(wlcore_set_key);
3032 3167
3033static int wl1271_op_hw_scan(struct ieee80211_hw *hw, 3168static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3034 struct ieee80211_vif *vif, 3169 struct ieee80211_vif *vif,
@@ -3167,6 +3302,7 @@ static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3167 struct ieee80211_vif *vif) 3302 struct ieee80211_vif *vif)
3168{ 3303{
3169 struct wl1271 *wl = hw->priv; 3304 struct wl1271 *wl = hw->priv;
3305 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3170 int ret; 3306 int ret;
3171 3307
3172 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop"); 3308 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
@@ -3180,7 +3316,7 @@ static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3180 if (ret < 0) 3316 if (ret < 0)
3181 goto out; 3317 goto out;
3182 3318
3183 wl1271_scan_sched_scan_stop(wl); 3319 wl1271_scan_sched_scan_stop(wl, wlvif);
3184 3320
3185 wl1271_ps_elp_sleep(wl); 3321 wl1271_ps_elp_sleep(wl);
3186out: 3322out:
@@ -3316,8 +3452,15 @@ static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3316 skb->data, 3452 skb->data,
3317 skb->len, 0, 3453 skb->len, 0,
3318 rates); 3454 rates);
3319
3320 dev_kfree_skb(skb); 3455 dev_kfree_skb(skb);
3456
3457 if (ret < 0)
3458 goto out;
3459
3460 wl1271_debug(DEBUG_AP, "probe response updated");
3461 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3462
3463out:
3321 return ret; 3464 return ret;
3322} 3465}
3323 3466
@@ -3422,6 +3565,87 @@ out:
3422 return ret; 3565 return ret;
3423} 3566}
3424 3567
3568static int wlcore_set_beacon_template(struct wl1271 *wl,
3569 struct ieee80211_vif *vif,
3570 bool is_ap)
3571{
3572 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3573 struct ieee80211_hdr *hdr;
3574 u32 min_rate;
3575 int ret;
3576 int ieoffset = offsetof(struct ieee80211_mgmt,
3577 u.beacon.variable);
3578 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3579 u16 tmpl_id;
3580
3581 if (!beacon) {
3582 ret = -EINVAL;
3583 goto out;
3584 }
3585
3586 wl1271_debug(DEBUG_MASTER, "beacon updated");
3587
3588 ret = wl1271_ssid_set(vif, beacon, ieoffset);
3589 if (ret < 0) {
3590 dev_kfree_skb(beacon);
3591 goto out;
3592 }
3593 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3594 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3595 CMD_TEMPL_BEACON;
3596 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3597 beacon->data,
3598 beacon->len, 0,
3599 min_rate);
3600 if (ret < 0) {
3601 dev_kfree_skb(beacon);
3602 goto out;
3603 }
3604
3605 /*
3606 * In case we already have a probe-resp beacon set explicitly
3607 * by usermode, don't use the beacon data.
3608 */
3609 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
3610 goto end_bcn;
3611
3612 /* remove TIM ie from probe response */
3613 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
3614
3615 /*
3616 * remove p2p ie from probe response.
3617 * the fw reponds to probe requests that don't include
3618 * the p2p ie. probe requests with p2p ie will be passed,
3619 * and will be responded by the supplicant (the spec
3620 * forbids including the p2p ie when responding to probe
3621 * requests that didn't include it).
3622 */
3623 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
3624 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
3625
3626 hdr = (struct ieee80211_hdr *) beacon->data;
3627 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3628 IEEE80211_STYPE_PROBE_RESP);
3629 if (is_ap)
3630 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
3631 beacon->data,
3632 beacon->len,
3633 min_rate);
3634 else
3635 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3636 CMD_TEMPL_PROBE_RESPONSE,
3637 beacon->data,
3638 beacon->len, 0,
3639 min_rate);
3640end_bcn:
3641 dev_kfree_skb(beacon);
3642 if (ret < 0)
3643 goto out;
3644
3645out:
3646 return ret;
3647}
3648
3425static int wl1271_bss_beacon_info_changed(struct wl1271 *wl, 3649static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3426 struct ieee80211_vif *vif, 3650 struct ieee80211_vif *vif,
3427 struct ieee80211_bss_conf *bss_conf, 3651 struct ieee80211_bss_conf *bss_conf,
@@ -3440,81 +3664,12 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3440 3664
3441 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) { 3665 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
3442 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); 3666 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3443 if (!wl1271_ap_set_probe_resp_tmpl(wl, rate, vif)) { 3667
3444 wl1271_debug(DEBUG_AP, "probe response updated"); 3668 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
3445 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3446 }
3447 } 3669 }
3448 3670
3449 if ((changed & BSS_CHANGED_BEACON)) { 3671 if ((changed & BSS_CHANGED_BEACON)) {
3450 struct ieee80211_hdr *hdr; 3672 ret = wlcore_set_beacon_template(wl, vif, is_ap);
3451 u32 min_rate;
3452 int ieoffset = offsetof(struct ieee80211_mgmt,
3453 u.beacon.variable);
3454 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3455 u16 tmpl_id;
3456
3457 if (!beacon) {
3458 ret = -EINVAL;
3459 goto out;
3460 }
3461
3462 wl1271_debug(DEBUG_MASTER, "beacon updated");
3463
3464 ret = wl1271_ssid_set(vif, beacon, ieoffset);
3465 if (ret < 0) {
3466 dev_kfree_skb(beacon);
3467 goto out;
3468 }
3469 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3470 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3471 CMD_TEMPL_BEACON;
3472 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3473 beacon->data,
3474 beacon->len, 0,
3475 min_rate);
3476 if (ret < 0) {
3477 dev_kfree_skb(beacon);
3478 goto out;
3479 }
3480
3481 /*
3482 * In case we already have a probe-resp beacon set explicitly
3483 * by usermode, don't use the beacon data.
3484 */
3485 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
3486 goto end_bcn;
3487
3488 /* remove TIM ie from probe response */
3489 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
3490
3491 /*
3492 * remove p2p ie from probe response.
3493 * the fw reponds to probe requests that don't include
3494 * the p2p ie. probe requests with p2p ie will be passed,
3495 * and will be responded by the supplicant (the spec
3496 * forbids including the p2p ie when responding to probe
3497 * requests that didn't include it).
3498 */
3499 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
3500 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
3501
3502 hdr = (struct ieee80211_hdr *) beacon->data;
3503 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3504 IEEE80211_STYPE_PROBE_RESP);
3505 if (is_ap)
3506 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
3507 beacon->data,
3508 beacon->len,
3509 min_rate);
3510 else
3511 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3512 CMD_TEMPL_PROBE_RESPONSE,
3513 beacon->data,
3514 beacon->len, 0,
3515 min_rate);
3516end_bcn:
3517 dev_kfree_skb(beacon);
3518 if (ret < 0) 3673 if (ret < 0)
3519 goto out; 3674 goto out;
3520 } 3675 }
@@ -3551,6 +3706,14 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
3551 ret = wl1271_ap_init_templates(wl, vif); 3706 ret = wl1271_ap_init_templates(wl, vif);
3552 if (ret < 0) 3707 if (ret < 0)
3553 goto out; 3708 goto out;
3709
3710 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
3711 if (ret < 0)
3712 goto out;
3713
3714 ret = wlcore_set_beacon_template(wl, vif, true);
3715 if (ret < 0)
3716 goto out;
3554 } 3717 }
3555 3718
3556 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed); 3719 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
@@ -3691,7 +3854,8 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
3691 sta_rate_set = sta->supp_rates[wl->hw->conf.channel->band]; 3854 sta_rate_set = sta->supp_rates[wl->hw->conf.channel->band];
3692 if (sta->ht_cap.ht_supported) 3855 if (sta->ht_cap.ht_supported)
3693 sta_rate_set |= 3856 sta_rate_set |=
3694 (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET); 3857 (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET) |
3858 (sta->ht_cap.mcs.rx_mask[1] << HW_MIMO_RATES_OFFSET);
3695 sta_ht_cap = sta->ht_cap; 3859 sta_ht_cap = sta->ht_cap;
3696 sta_exists = true; 3860 sta_exists = true;
3697 3861
@@ -3704,13 +3868,11 @@ sta_not_found:
3704 u32 rates; 3868 u32 rates;
3705 int ieoffset; 3869 int ieoffset;
3706 wlvif->aid = bss_conf->aid; 3870 wlvif->aid = bss_conf->aid;
3871 wlvif->channel_type = bss_conf->channel_type;
3707 wlvif->beacon_int = bss_conf->beacon_int; 3872 wlvif->beacon_int = bss_conf->beacon_int;
3708 do_join = true; 3873 do_join = true;
3709 set_assoc = true; 3874 set_assoc = true;
3710 3875
3711 /* Cancel connection_loss_work */
3712 cancel_delayed_work_sync(&wl->connection_loss_work);
3713
3714 /* 3876 /*
3715 * use basic rates from AP, and determine lowest rate 3877 * use basic rates from AP, and determine lowest rate
3716 * to use with control frames. 3878 * to use with control frames.
@@ -3960,6 +4122,17 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
3960 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x", 4122 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x",
3961 (int)changed); 4123 (int)changed);
3962 4124
4125 /*
4126 * make sure to cancel pending disconnections if our association
4127 * state changed
4128 */
4129 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4130 cancel_delayed_work_sync(&wl->connection_loss_work);
4131
4132 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4133 !bss_conf->enable_beacon)
4134 wl1271_tx_flush(wl);
4135
3963 mutex_lock(&wl->mutex); 4136 mutex_lock(&wl->mutex);
3964 4137
3965 if (unlikely(wl->state == WL1271_STATE_OFF)) 4138 if (unlikely(wl->state == WL1271_STATE_OFF))
@@ -4068,16 +4241,13 @@ out:
4068static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx, 4241static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4069 struct survey_info *survey) 4242 struct survey_info *survey)
4070{ 4243{
4071 struct wl1271 *wl = hw->priv;
4072 struct ieee80211_conf *conf = &hw->conf; 4244 struct ieee80211_conf *conf = &hw->conf;
4073 4245
4074 if (idx != 0) 4246 if (idx != 0)
4075 return -ENOENT; 4247 return -ENOENT;
4076 4248
4077 survey->channel = conf->channel; 4249 survey->channel = conf->channel;
4078 survey->filled = SURVEY_INFO_NOISE_DBM; 4250 survey->filled = 0;
4079 survey->noise = wl->noise;
4080
4081 return 0; 4251 return 0;
4082} 4252}
4083 4253
@@ -4343,9 +4513,14 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4343 4513
4344 case IEEE80211_AMPDU_RX_STOP: 4514 case IEEE80211_AMPDU_RX_STOP:
4345 if (!(*ba_bitmap & BIT(tid))) { 4515 if (!(*ba_bitmap & BIT(tid))) {
4346 ret = -EINVAL; 4516 /*
4347 wl1271_error("no active RX BA session on tid: %d", 4517 * this happens on reconfig - so only output a debug
4518 * message for now, and don't fail the function.
4519 */
4520 wl1271_debug(DEBUG_MAC80211,
4521 "no active RX BA session on tid: %d",
4348 tid); 4522 tid);
4523 ret = 0;
4349 break; 4524 break;
4350 } 4525 }
4351 4526
@@ -4394,7 +4569,7 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
4394 4569
4395 mutex_lock(&wl->mutex); 4570 mutex_lock(&wl->mutex);
4396 4571
4397 for (i = 0; i < IEEE80211_NUM_BANDS; i++) 4572 for (i = 0; i < WLCORE_NUM_BANDS; i++)
4398 wlvif->bitrate_masks[i] = 4573 wlvif->bitrate_masks[i] =
4399 wl1271_tx_enabled_rates_get(wl, 4574 wl1271_tx_enabled_rates_get(wl,
4400 mask->control[i].legacy, 4575 mask->control[i].legacy,
@@ -4462,6 +4637,13 @@ out:
4462 mutex_unlock(&wl->mutex); 4637 mutex_unlock(&wl->mutex);
4463} 4638}
4464 4639
4640static void wlcore_op_flush(struct ieee80211_hw *hw, bool drop)
4641{
4642 struct wl1271 *wl = hw->priv;
4643
4644 wl1271_tx_flush(wl);
4645}
4646
4465static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw) 4647static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
4466{ 4648{
4467 struct wl1271 *wl = hw->priv; 4649 struct wl1271 *wl = hw->priv;
@@ -4624,7 +4806,7 @@ static struct ieee80211_supported_band wl1271_band_5ghz = {
4624 4806
4625static const struct ieee80211_ops wl1271_ops = { 4807static const struct ieee80211_ops wl1271_ops = {
4626 .start = wl1271_op_start, 4808 .start = wl1271_op_start,
4627 .stop = wl1271_op_stop, 4809 .stop = wlcore_op_stop,
4628 .add_interface = wl1271_op_add_interface, 4810 .add_interface = wl1271_op_add_interface,
4629 .remove_interface = wl1271_op_remove_interface, 4811 .remove_interface = wl1271_op_remove_interface,
4630 .change_interface = wl12xx_op_change_interface, 4812 .change_interface = wl12xx_op_change_interface,
@@ -4636,7 +4818,7 @@ static const struct ieee80211_ops wl1271_ops = {
4636 .prepare_multicast = wl1271_op_prepare_multicast, 4818 .prepare_multicast = wl1271_op_prepare_multicast,
4637 .configure_filter = wl1271_op_configure_filter, 4819 .configure_filter = wl1271_op_configure_filter,
4638 .tx = wl1271_op_tx, 4820 .tx = wl1271_op_tx,
4639 .set_key = wl1271_op_set_key, 4821 .set_key = wlcore_op_set_key,
4640 .hw_scan = wl1271_op_hw_scan, 4822 .hw_scan = wl1271_op_hw_scan,
4641 .cancel_hw_scan = wl1271_op_cancel_hw_scan, 4823 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
4642 .sched_scan_start = wl1271_op_sched_scan_start, 4824 .sched_scan_start = wl1271_op_sched_scan_start,
@@ -4652,6 +4834,7 @@ static const struct ieee80211_ops wl1271_ops = {
4652 .tx_frames_pending = wl1271_tx_frames_pending, 4834 .tx_frames_pending = wl1271_tx_frames_pending,
4653 .set_bitrate_mask = wl12xx_set_bitrate_mask, 4835 .set_bitrate_mask = wl12xx_set_bitrate_mask,
4654 .channel_switch = wl12xx_op_channel_switch, 4836 .channel_switch = wl12xx_op_channel_switch,
4837 .flush = wlcore_op_flush,
4655 CFG80211_TESTMODE_CMD(wl1271_tm_cmd) 4838 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
4656}; 4839};
4657 4840
@@ -4882,18 +5065,22 @@ static int wl12xx_get_hw_info(struct wl1271 *wl)
4882 if (ret < 0) 5065 if (ret < 0)
4883 goto out; 5066 goto out;
4884 5067
4885 wl->chip.id = wlcore_read_reg(wl, REG_CHIP_ID_B); 5068 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5069 if (ret < 0)
5070 goto out;
4886 5071
4887 wl->fuse_oui_addr = 0; 5072 wl->fuse_oui_addr = 0;
4888 wl->fuse_nic_addr = 0; 5073 wl->fuse_nic_addr = 0;
4889 5074
4890 wl->hw_pg_ver = wl->ops->get_pg_ver(wl); 5075 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5076 if (ret < 0)
5077 goto out;
4891 5078
4892 if (wl->ops->get_mac) 5079 if (wl->ops->get_mac)
4893 wl->ops->get_mac(wl); 5080 ret = wl->ops->get_mac(wl);
4894 5081
4895 wl1271_power_off(wl);
4896out: 5082out:
5083 wl1271_power_off(wl);
4897 return ret; 5084 return ret;
4898} 5085}
4899 5086
@@ -4905,14 +5092,8 @@ static int wl1271_register_hw(struct wl1271 *wl)
4905 if (wl->mac80211_registered) 5092 if (wl->mac80211_registered)
4906 return 0; 5093 return 0;
4907 5094
4908 ret = wl12xx_get_hw_info(wl); 5095 wl1271_fetch_nvs(wl);
4909 if (ret < 0) { 5096 if (wl->nvs != NULL) {
4910 wl1271_error("couldn't get hw info");
4911 goto out;
4912 }
4913
4914 ret = wl1271_fetch_nvs(wl);
4915 if (ret == 0) {
4916 /* NOTE: The wl->nvs->nvs element must be first, in 5097 /* NOTE: The wl->nvs->nvs element must be first, in
4917 * order to simplify the casting, we assume it is at 5098 * order to simplify the casting, we assume it is at
4918 * the beginning of the wl->nvs structure. 5099 * the beginning of the wl->nvs structure.
@@ -4960,6 +5141,29 @@ static void wl1271_unregister_hw(struct wl1271 *wl)
4960 5141
4961} 5142}
4962 5143
5144static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
5145 {
5146 .max = 2,
5147 .types = BIT(NL80211_IFTYPE_STATION),
5148 },
5149 {
5150 .max = 1,
5151 .types = BIT(NL80211_IFTYPE_AP) |
5152 BIT(NL80211_IFTYPE_P2P_GO) |
5153 BIT(NL80211_IFTYPE_P2P_CLIENT),
5154 },
5155};
5156
5157static const struct ieee80211_iface_combination
5158wlcore_iface_combinations[] = {
5159 {
5160 .num_different_channels = 1,
5161 .max_interfaces = 2,
5162 .limits = wlcore_iface_limits,
5163 .n_limits = ARRAY_SIZE(wlcore_iface_limits),
5164 },
5165};
5166
4963static int wl1271_init_ieee80211(struct wl1271 *wl) 5167static int wl1271_init_ieee80211(struct wl1271 *wl)
4964{ 5168{
4965 static const u32 cipher_suites[] = { 5169 static const u32 cipher_suites[] = {
@@ -4970,9 +5174,11 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
4970 WL1271_CIPHER_SUITE_GEM, 5174 WL1271_CIPHER_SUITE_GEM,
4971 }; 5175 };
4972 5176
4973 /* The tx descriptor buffer and the TKIP space. */ 5177 /* The tx descriptor buffer */
4974 wl->hw->extra_tx_headroom = WL1271_EXTRA_SPACE_TKIP + 5178 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
4975 sizeof(struct wl1271_tx_hw_descr); 5179
5180 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
5181 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
4976 5182
4977 /* unit us */ 5183 /* unit us */
4978 /* FIXME: find a proper value */ 5184 /* FIXME: find a proper value */
@@ -5025,12 +5231,14 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
5025 */ 5231 */
5026 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz, 5232 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5027 sizeof(wl1271_band_2ghz)); 5233 sizeof(wl1271_band_2ghz));
5028 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap, &wl->ht_cap, 5234 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
5029 sizeof(wl->ht_cap)); 5235 &wl->ht_cap[IEEE80211_BAND_2GHZ],
5236 sizeof(*wl->ht_cap));
5030 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz, 5237 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5031 sizeof(wl1271_band_5ghz)); 5238 sizeof(wl1271_band_5ghz));
5032 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap, &wl->ht_cap, 5239 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
5033 sizeof(wl->ht_cap)); 5240 &wl->ht_cap[IEEE80211_BAND_5GHZ],
5241 sizeof(*wl->ht_cap));
5034 5242
5035 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 5243 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5036 &wl->bands[IEEE80211_BAND_2GHZ]; 5244 &wl->bands[IEEE80211_BAND_2GHZ];
@@ -5049,6 +5257,11 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
5049 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | 5257 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5050 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; 5258 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5051 5259
5260 /* allowed interface combinations */
5261 wl->hw->wiphy->iface_combinations = wlcore_iface_combinations;
5262 wl->hw->wiphy->n_iface_combinations =
5263 ARRAY_SIZE(wlcore_iface_combinations);
5264
5052 SET_IEEE80211_DEV(wl->hw, wl->dev); 5265 SET_IEEE80211_DEV(wl->hw, wl->dev);
5053 5266
5054 wl->hw->sta_data_size = sizeof(struct wl1271_station); 5267 wl->hw->sta_data_size = sizeof(struct wl1271_station);
@@ -5117,8 +5330,10 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size)
5117 wl->rx_counter = 0; 5330 wl->rx_counter = 0;
5118 wl->power_level = WL1271_DEFAULT_POWER_LEVEL; 5331 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5119 wl->band = IEEE80211_BAND_2GHZ; 5332 wl->band = IEEE80211_BAND_2GHZ;
5333 wl->channel_type = NL80211_CHAN_NO_HT;
5120 wl->flags = 0; 5334 wl->flags = 0;
5121 wl->sg_enabled = true; 5335 wl->sg_enabled = true;
5336 wl->sleep_auth = WL1271_PSM_ILLEGAL;
5122 wl->hw_pg_ver = -1; 5337 wl->hw_pg_ver = -1;
5123 wl->ap_ps_map = 0; 5338 wl->ap_ps_map = 0;
5124 wl->ap_fw_ps_map = 0; 5339 wl->ap_fw_ps_map = 0;
@@ -5142,6 +5357,7 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size)
5142 wl->state = WL1271_STATE_OFF; 5357 wl->state = WL1271_STATE_OFF;
5143 wl->fw_type = WL12XX_FW_TYPE_NONE; 5358 wl->fw_type = WL12XX_FW_TYPE_NONE;
5144 mutex_init(&wl->mutex); 5359 mutex_init(&wl->mutex);
5360 mutex_init(&wl->flush_mutex);
5145 5361
5146 order = get_order(WL1271_AGGR_BUFFER_SIZE); 5362 order = get_order(WL1271_AGGR_BUFFER_SIZE);
5147 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order); 5363 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
@@ -5222,7 +5438,7 @@ int wlcore_free_hw(struct wl1271 *wl)
5222 kfree(wl->nvs); 5438 kfree(wl->nvs);
5223 wl->nvs = NULL; 5439 wl->nvs = NULL;
5224 5440
5225 kfree(wl->fw_status); 5441 kfree(wl->fw_status_1);
5226 kfree(wl->tx_res_if); 5442 kfree(wl->tx_res_if);
5227 destroy_workqueue(wl->freezable_wq); 5443 destroy_workqueue(wl->freezable_wq);
5228 5444
@@ -5279,8 +5495,6 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
5279 wlcore_adjust_conf(wl); 5495 wlcore_adjust_conf(wl);
5280 5496
5281 wl->irq = platform_get_irq(pdev, 0); 5497 wl->irq = platform_get_irq(pdev, 0);
5282 wl->ref_clock = pdata->board_ref_clock;
5283 wl->tcxo_clock = pdata->board_tcxo_clock;
5284 wl->platform_quirks = pdata->platform_quirks; 5498 wl->platform_quirks = pdata->platform_quirks;
5285 wl->set_power = pdata->set_power; 5499 wl->set_power = pdata->set_power;
5286 wl->dev = &pdev->dev; 5500 wl->dev = &pdev->dev;
@@ -5293,7 +5507,7 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
5293 else 5507 else
5294 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT; 5508 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
5295 5509
5296 ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wl1271_irq, 5510 ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wlcore_irq,
5297 irqflags, 5511 irqflags,
5298 pdev->name, wl); 5512 pdev->name, wl);
5299 if (ret < 0) { 5513 if (ret < 0) {
@@ -5301,6 +5515,7 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
5301 goto out_free_hw; 5515 goto out_free_hw;
5302 } 5516 }
5303 5517
5518#ifdef CONFIG_PM
5304 ret = enable_irq_wake(wl->irq); 5519 ret = enable_irq_wake(wl->irq);
5305 if (!ret) { 5520 if (!ret) {
5306 wl->irq_wake_enabled = true; 5521 wl->irq_wake_enabled = true;
@@ -5314,8 +5529,19 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
5314 WL1271_RX_FILTER_MAX_PATTERN_SIZE; 5529 WL1271_RX_FILTER_MAX_PATTERN_SIZE;
5315 } 5530 }
5316 } 5531 }
5532#endif
5317 disable_irq(wl->irq); 5533 disable_irq(wl->irq);
5318 5534
5535 ret = wl12xx_get_hw_info(wl);
5536 if (ret < 0) {
5537 wl1271_error("couldn't get hw info");
5538 goto out_irq;
5539 }
5540
5541 ret = wl->ops->identify_chip(wl);
5542 if (ret < 0)
5543 goto out_irq;
5544
5319 ret = wl1271_init_ieee80211(wl); 5545 ret = wl1271_init_ieee80211(wl);
5320 if (ret) 5546 if (ret)
5321 goto out_irq; 5547 goto out_irq;
@@ -5328,7 +5554,7 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
5328 ret = device_create_file(wl->dev, &dev_attr_bt_coex_state); 5554 ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
5329 if (ret < 0) { 5555 if (ret < 0) {
5330 wl1271_error("failed to create sysfs file bt_coex_state"); 5556 wl1271_error("failed to create sysfs file bt_coex_state");
5331 goto out_irq; 5557 goto out_unreg;
5332 } 5558 }
5333 5559
5334 /* Create sysfs file to get HW PG version */ 5560 /* Create sysfs file to get HW PG version */
@@ -5353,6 +5579,9 @@ out_hw_pg_ver:
5353out_bt_coex_state: 5579out_bt_coex_state:
5354 device_remove_file(wl->dev, &dev_attr_bt_coex_state); 5580 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5355 5581
5582out_unreg:
5583 wl1271_unregister_hw(wl);
5584
5356out_irq: 5585out_irq:
5357 free_irq(wl->irq, wl); 5586 free_irq(wl->irq, wl);
5358 5587
diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
index 756eee2257b4..46d36fd30eba 100644
--- a/drivers/net/wireless/ti/wlcore/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -28,11 +28,14 @@
28 28
29#define WL1271_WAKEUP_TIMEOUT 500 29#define WL1271_WAKEUP_TIMEOUT 500
30 30
31#define ELP_ENTRY_DELAY 5
32
31void wl1271_elp_work(struct work_struct *work) 33void wl1271_elp_work(struct work_struct *work)
32{ 34{
33 struct delayed_work *dwork; 35 struct delayed_work *dwork;
34 struct wl1271 *wl; 36 struct wl1271 *wl;
35 struct wl12xx_vif *wlvif; 37 struct wl12xx_vif *wlvif;
38 int ret;
36 39
37 dwork = container_of(work, struct delayed_work, work); 40 dwork = container_of(work, struct delayed_work, work);
38 wl = container_of(dwork, struct wl1271, elp_work); 41 wl = container_of(dwork, struct wl1271, elp_work);
@@ -61,7 +64,12 @@ void wl1271_elp_work(struct work_struct *work)
61 } 64 }
62 65
63 wl1271_debug(DEBUG_PSM, "chip to elp"); 66 wl1271_debug(DEBUG_PSM, "chip to elp");
64 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP); 67 ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
68 if (ret < 0) {
69 wl12xx_queue_recovery_work(wl);
70 goto out;
71 }
72
65 set_bit(WL1271_FLAG_IN_ELP, &wl->flags); 73 set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
66 74
67out: 75out:
@@ -72,8 +80,9 @@ out:
72void wl1271_ps_elp_sleep(struct wl1271 *wl) 80void wl1271_ps_elp_sleep(struct wl1271 *wl)
73{ 81{
74 struct wl12xx_vif *wlvif; 82 struct wl12xx_vif *wlvif;
83 u32 timeout;
75 84
76 if (wl->quirks & WLCORE_QUIRK_NO_ELP) 85 if (wl->sleep_auth != WL1271_PSM_ELP)
77 return; 86 return;
78 87
79 /* we shouldn't get consecutive sleep requests */ 88 /* we shouldn't get consecutive sleep requests */
@@ -89,8 +98,13 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl)
89 return; 98 return;
90 } 99 }
91 100
101 if (wl->conf.conn.forced_ps)
102 timeout = ELP_ENTRY_DELAY;
103 else
104 timeout = wl->conf.conn.dynamic_ps_timeout;
105
92 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work, 106 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
93 msecs_to_jiffies(wl->conf.conn.dynamic_ps_timeout)); 107 msecs_to_jiffies(timeout));
94} 108}
95 109
96int wl1271_ps_elp_wakeup(struct wl1271 *wl) 110int wl1271_ps_elp_wakeup(struct wl1271 *wl)
@@ -127,7 +141,11 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl)
127 wl->elp_compl = &compl; 141 wl->elp_compl = &compl;
128 spin_unlock_irqrestore(&wl->wl_lock, flags); 142 spin_unlock_irqrestore(&wl->wl_lock, flags);
129 143
130 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP); 144 ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
145 if (ret < 0) {
146 wl12xx_queue_recovery_work(wl);
147 goto err;
148 }
131 149
132 if (!pending) { 150 if (!pending) {
133 ret = wait_for_completion_timeout( 151 ret = wait_for_completion_timeout(
@@ -185,8 +203,12 @@ int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
185 203
186 set_bit(WLVIF_FLAG_IN_PS, &wlvif->flags); 204 set_bit(WLVIF_FLAG_IN_PS, &wlvif->flags);
187 205
188 /* enable beacon early termination. Not relevant for 5GHz */ 206 /*
189 if (wlvif->band == IEEE80211_BAND_2GHZ) { 207 * enable beacon early termination.
208 * Not relevant for 5GHz and for high rates.
209 */
210 if ((wlvif->band == IEEE80211_BAND_2GHZ) &&
211 (wlvif->basic_rate < CONF_HW_BIT_RATE_9MBPS)) {
190 ret = wl1271_acx_bet_enable(wl, wlvif, true); 212 ret = wl1271_acx_bet_enable(wl, wlvif, true);
191 if (ret < 0) 213 if (ret < 0)
192 return ret; 214 return ret;
@@ -196,7 +218,8 @@ int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
196 wl1271_debug(DEBUG_PSM, "leaving psm"); 218 wl1271_debug(DEBUG_PSM, "leaving psm");
197 219
198 /* disable beacon early termination */ 220 /* disable beacon early termination */
199 if (wlvif->band == IEEE80211_BAND_2GHZ) { 221 if ((wlvif->band == IEEE80211_BAND_2GHZ) &&
222 (wlvif->basic_rate < CONF_HW_BIT_RATE_9MBPS)) {
200 ret = wl1271_acx_bet_enable(wl, wlvif, false); 223 ret = wl1271_acx_bet_enable(wl, wlvif, false);
201 if (ret < 0) 224 if (ret < 0)
202 return ret; 225 return ret;
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index d6a3c6b07827..f55e2f9e7ac5 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -127,7 +127,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
127 } 127 }
128 128
129 if (rx_align == WLCORE_RX_BUF_UNALIGNED) 129 if (rx_align == WLCORE_RX_BUF_UNALIGNED)
130 reserved = NET_IP_ALIGN; 130 reserved = RX_BUF_ALIGN;
131 131
132 /* the data read starts with the descriptor */ 132 /* the data read starts with the descriptor */
133 desc = (struct wl1271_rx_descriptor *) data; 133 desc = (struct wl1271_rx_descriptor *) data;
@@ -175,7 +175,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
175 */ 175 */
176 memcpy(buf, data + sizeof(*desc), pkt_data_len); 176 memcpy(buf, data + sizeof(*desc), pkt_data_len);
177 if (rx_align == WLCORE_RX_BUF_PADDED) 177 if (rx_align == WLCORE_RX_BUF_PADDED)
178 skb_pull(skb, NET_IP_ALIGN); 178 skb_pull(skb, RX_BUF_ALIGN);
179 179
180 *hlid = desc->hlid; 180 *hlid = desc->hlid;
181 181
@@ -186,6 +186,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
186 is_data = 1; 186 is_data = 1;
187 187
188 wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon); 188 wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
189 wlcore_hw_set_rx_csum(wl, desc, skb);
189 190
190 seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; 191 seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
191 wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s seq %d hlid %d", skb, 192 wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s seq %d hlid %d", skb,
@@ -199,17 +200,18 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
199 return is_data; 200 return is_data;
200} 201}
201 202
202void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status) 203int wlcore_rx(struct wl1271 *wl, struct wl_fw_status_1 *status)
203{ 204{
204 unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0}; 205 unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
205 u32 buf_size; 206 u32 buf_size;
206 u32 fw_rx_counter = status->fw_rx_counter & NUM_RX_PKT_DESC_MOD_MASK; 207 u32 fw_rx_counter = status->fw_rx_counter % wl->num_rx_desc;
207 u32 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK; 208 u32 drv_rx_counter = wl->rx_counter % wl->num_rx_desc;
208 u32 rx_counter; 209 u32 rx_counter;
209 u32 pkt_len, align_pkt_len; 210 u32 pkt_len, align_pkt_len;
210 u32 pkt_offset, des; 211 u32 pkt_offset, des;
211 u8 hlid; 212 u8 hlid;
212 enum wl_rx_buf_align rx_align; 213 enum wl_rx_buf_align rx_align;
214 int ret = 0;
213 215
214 while (drv_rx_counter != fw_rx_counter) { 216 while (drv_rx_counter != fw_rx_counter) {
215 buf_size = 0; 217 buf_size = 0;
@@ -223,7 +225,7 @@ void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status)
223 break; 225 break;
224 buf_size += align_pkt_len; 226 buf_size += align_pkt_len;
225 rx_counter++; 227 rx_counter++;
226 rx_counter &= NUM_RX_PKT_DESC_MOD_MASK; 228 rx_counter %= wl->num_rx_desc;
227 } 229 }
228 230
229 if (buf_size == 0) { 231 if (buf_size == 0) {
@@ -233,9 +235,14 @@ void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status)
233 235
234 /* Read all available packets at once */ 236 /* Read all available packets at once */
235 des = le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]); 237 des = le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]);
236 wlcore_hw_prepare_read(wl, des, buf_size); 238 ret = wlcore_hw_prepare_read(wl, des, buf_size);
237 wlcore_read_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf, 239 if (ret < 0)
238 buf_size, true); 240 goto out;
241
242 ret = wlcore_read_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
243 buf_size, true);
244 if (ret < 0)
245 goto out;
239 246
240 /* Split data into separate packets */ 247 /* Split data into separate packets */
241 pkt_offset = 0; 248 pkt_offset = 0;
@@ -263,7 +270,7 @@ void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status)
263 270
264 wl->rx_counter++; 271 wl->rx_counter++;
265 drv_rx_counter++; 272 drv_rx_counter++;
266 drv_rx_counter &= NUM_RX_PKT_DESC_MOD_MASK; 273 drv_rx_counter %= wl->num_rx_desc;
267 pkt_offset += wlcore_rx_get_align_buf_size(wl, pkt_len); 274 pkt_offset += wlcore_rx_get_align_buf_size(wl, pkt_len);
268 } 275 }
269 } 276 }
@@ -272,11 +279,17 @@ void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status)
272 * Write the driver's packet counter to the FW. This is only required 279 * Write the driver's packet counter to the FW. This is only required
273 * for older hardware revisions 280 * for older hardware revisions
274 */ 281 */
275 if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) 282 if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) {
276 wl1271_write32(wl, WL12XX_REG_RX_DRIVER_COUNTER, 283 ret = wlcore_write32(wl, WL12XX_REG_RX_DRIVER_COUNTER,
277 wl->rx_counter); 284 wl->rx_counter);
285 if (ret < 0)
286 goto out;
287 }
278 288
279 wl12xx_rearm_rx_streaming(wl, active_hlids); 289 wl12xx_rearm_rx_streaming(wl, active_hlids);
290
291out:
292 return ret;
280} 293}
281 294
282#ifdef CONFIG_PM 295#ifdef CONFIG_PM
@@ -305,14 +318,19 @@ int wl1271_rx_filter_enable(struct wl1271 *wl,
305 return 0; 318 return 0;
306} 319}
307 320
308void wl1271_rx_filter_clear_all(struct wl1271 *wl) 321int wl1271_rx_filter_clear_all(struct wl1271 *wl)
309{ 322{
310 int i; 323 int i, ret = 0;
311 324
312 for (i = 0; i < WL1271_MAX_RX_FILTERS; i++) { 325 for (i = 0; i < WL1271_MAX_RX_FILTERS; i++) {
313 if (!wl->rx_filter_enabled[i]) 326 if (!wl->rx_filter_enabled[i])
314 continue; 327 continue;
315 wl1271_rx_filter_enable(wl, i, 0, NULL); 328 ret = wl1271_rx_filter_enable(wl, i, 0, NULL);
329 if (ret)
330 goto out;
316 } 331 }
332
333out:
334 return ret;
317} 335}
318#endif /* CONFIG_PM */ 336#endif /* CONFIG_PM */
diff --git a/drivers/net/wireless/ti/wlcore/rx.h b/drivers/net/wireless/ti/wlcore/rx.h
index e9a162a864ca..71eba1899915 100644
--- a/drivers/net/wireless/ti/wlcore/rx.h
+++ b/drivers/net/wireless/ti/wlcore/rx.h
@@ -38,8 +38,6 @@
38#define RX_DESC_PACKETID_SHIFT 11 38#define RX_DESC_PACKETID_SHIFT 11
39#define RX_MAX_PACKET_ID 3 39#define RX_MAX_PACKET_ID 3
40 40
41#define NUM_RX_PKT_DESC_MOD_MASK 7
42
43#define RX_DESC_VALID_FCS 0x0001 41#define RX_DESC_VALID_FCS 0x0001
44#define RX_DESC_MATCH_RXADDR1 0x0002 42#define RX_DESC_MATCH_RXADDR1 0x0002
45#define RX_DESC_MCAST 0x0004 43#define RX_DESC_MCAST 0x0004
@@ -102,6 +100,15 @@
102/* If set, the start of IP payload is not 4 bytes aligned */ 100/* If set, the start of IP payload is not 4 bytes aligned */
103#define RX_BUF_UNALIGNED_PAYLOAD BIT(20) 101#define RX_BUF_UNALIGNED_PAYLOAD BIT(20)
104 102
103/* If set, the buffer was padded by the FW to be 4 bytes aligned */
104#define RX_BUF_PADDED_PAYLOAD BIT(30)
105
106/*
107 * Account for the padding inserted by the FW in case of RX_ALIGNMENT
108 * or for fixing alignment in case the packet wasn't aligned.
109 */
110#define RX_BUF_ALIGN 2
111
105/* Describes the alignment state of a Rx buffer */ 112/* Describes the alignment state of a Rx buffer */
106enum wl_rx_buf_align { 113enum wl_rx_buf_align {
107 WLCORE_RX_BUF_ALIGNED, 114 WLCORE_RX_BUF_ALIGNED,
@@ -136,11 +143,11 @@ struct wl1271_rx_descriptor {
136 u8 reserved; 143 u8 reserved;
137} __packed; 144} __packed;
138 145
139void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status); 146int wlcore_rx(struct wl1271 *wl, struct wl_fw_status_1 *status);
140u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band); 147u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
141int wl1271_rx_filter_enable(struct wl1271 *wl, 148int wl1271_rx_filter_enable(struct wl1271 *wl,
142 int index, bool enable, 149 int index, bool enable,
143 struct wl12xx_rx_filter *filter); 150 struct wl12xx_rx_filter *filter);
144void wl1271_rx_filter_clear_all(struct wl1271 *wl); 151int wl1271_rx_filter_clear_all(struct wl1271 *wl);
145 152
146#endif 153#endif
diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c
index ade21a011c45..dbeca1bfbb2c 100644
--- a/drivers/net/wireless/ti/wlcore/scan.c
+++ b/drivers/net/wireless/ti/wlcore/scan.c
@@ -226,7 +226,7 @@ static int wl1271_scan_send(struct wl1271 *wl, struct ieee80211_vif *vif,
226 cmd->params.role_id, band, 226 cmd->params.role_id, band,
227 wl->scan.ssid, wl->scan.ssid_len, 227 wl->scan.ssid, wl->scan.ssid_len,
228 wl->scan.req->ie, 228 wl->scan.req->ie,
229 wl->scan.req->ie_len); 229 wl->scan.req->ie_len, false);
230 if (ret < 0) { 230 if (ret < 0) {
231 wl1271_error("PROBE request template failed"); 231 wl1271_error("PROBE request template failed");
232 goto out; 232 goto out;
@@ -411,7 +411,8 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
411 struct cfg80211_sched_scan_request *req, 411 struct cfg80211_sched_scan_request *req,
412 struct conn_scan_ch_params *channels, 412 struct conn_scan_ch_params *channels,
413 u32 band, bool radar, bool passive, 413 u32 band, bool radar, bool passive,
414 int start, int max_channels) 414 int start, int max_channels,
415 u8 *n_pactive_ch)
415{ 416{
416 struct conf_sched_scan_settings *c = &wl->conf.sched_scan; 417 struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
417 int i, j; 418 int i, j;
@@ -479,6 +480,23 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
479 channels[j].tx_power_att = req->channels[i]->max_power; 480 channels[j].tx_power_att = req->channels[i]->max_power;
480 channels[j].channel = req->channels[i]->hw_value; 481 channels[j].channel = req->channels[i]->hw_value;
481 482
483 if ((band == IEEE80211_BAND_2GHZ) &&
484 (channels[j].channel >= 12) &&
485 (channels[j].channel <= 14) &&
486 (flags & IEEE80211_CHAN_PASSIVE_SCAN) &&
487 !force_passive) {
488 /* pactive channels treated as DFS */
489 channels[j].flags = SCAN_CHANNEL_FLAGS_DFS;
490
491 /*
492 * n_pactive_ch is counted down from the end of
493 * the passive channel list
494 */
495 (*n_pactive_ch)++;
496 wl1271_debug(DEBUG_SCAN, "n_pactive_ch = %d",
497 *n_pactive_ch);
498 }
499
482 j++; 500 j++;
483 } 501 }
484 } 502 }
@@ -491,38 +509,47 @@ wl1271_scan_sched_scan_channels(struct wl1271 *wl,
491 struct cfg80211_sched_scan_request *req, 509 struct cfg80211_sched_scan_request *req,
492 struct wl1271_cmd_sched_scan_config *cfg) 510 struct wl1271_cmd_sched_scan_config *cfg)
493{ 511{
512 u8 n_pactive_ch = 0;
513
494 cfg->passive[0] = 514 cfg->passive[0] =
495 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_2, 515 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_2,
496 IEEE80211_BAND_2GHZ, 516 IEEE80211_BAND_2GHZ,
497 false, true, 0, 517 false, true, 0,
498 MAX_CHANNELS_2GHZ); 518 MAX_CHANNELS_2GHZ,
519 &n_pactive_ch);
499 cfg->active[0] = 520 cfg->active[0] =
500 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_2, 521 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_2,
501 IEEE80211_BAND_2GHZ, 522 IEEE80211_BAND_2GHZ,
502 false, false, 523 false, false,
503 cfg->passive[0], 524 cfg->passive[0],
504 MAX_CHANNELS_2GHZ); 525 MAX_CHANNELS_2GHZ,
526 &n_pactive_ch);
505 cfg->passive[1] = 527 cfg->passive[1] =
506 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5, 528 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5,
507 IEEE80211_BAND_5GHZ, 529 IEEE80211_BAND_5GHZ,
508 false, true, 0, 530 false, true, 0,
509 MAX_CHANNELS_5GHZ); 531 MAX_CHANNELS_5GHZ,
532 &n_pactive_ch);
510 cfg->dfs = 533 cfg->dfs =
511 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5, 534 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5,
512 IEEE80211_BAND_5GHZ, 535 IEEE80211_BAND_5GHZ,
513 true, true, 536 true, true,
514 cfg->passive[1], 537 cfg->passive[1],
515 MAX_CHANNELS_5GHZ); 538 MAX_CHANNELS_5GHZ,
539 &n_pactive_ch);
516 cfg->active[1] = 540 cfg->active[1] =
517 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5, 541 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5,
518 IEEE80211_BAND_5GHZ, 542 IEEE80211_BAND_5GHZ,
519 false, false, 543 false, false,
520 cfg->passive[1] + cfg->dfs, 544 cfg->passive[1] + cfg->dfs,
521 MAX_CHANNELS_5GHZ); 545 MAX_CHANNELS_5GHZ,
546 &n_pactive_ch);
522 /* 802.11j channels are not supported yet */ 547 /* 802.11j channels are not supported yet */
523 cfg->passive[2] = 0; 548 cfg->passive[2] = 0;
524 cfg->active[2] = 0; 549 cfg->active[2] = 0;
525 550
551 cfg->n_pactive_ch = n_pactive_ch;
552
526 wl1271_debug(DEBUG_SCAN, " 2.4GHz: active %d passive %d", 553 wl1271_debug(DEBUG_SCAN, " 2.4GHz: active %d passive %d",
527 cfg->active[0], cfg->passive[0]); 554 cfg->active[0], cfg->passive[0]);
528 wl1271_debug(DEBUG_SCAN, " 5GHz: active %d passive %d", 555 wl1271_debug(DEBUG_SCAN, " 5GHz: active %d passive %d",
@@ -537,6 +564,7 @@ wl1271_scan_sched_scan_channels(struct wl1271 *wl,
537/* Returns the scan type to be used or a negative value on error */ 564/* Returns the scan type to be used or a negative value on error */
538static int 565static int
539wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl, 566wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl,
567 struct wl12xx_vif *wlvif,
540 struct cfg80211_sched_scan_request *req) 568 struct cfg80211_sched_scan_request *req)
541{ 569{
542 struct wl1271_cmd_sched_scan_ssid_list *cmd = NULL; 570 struct wl1271_cmd_sched_scan_ssid_list *cmd = NULL;
@@ -565,6 +593,7 @@ wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl,
565 goto out; 593 goto out;
566 } 594 }
567 595
596 cmd->role_id = wlvif->dev_role_id;
568 if (!n_match_ssids) { 597 if (!n_match_ssids) {
569 /* No filter, with ssids */ 598 /* No filter, with ssids */
570 type = SCAN_SSID_FILTER_DISABLED; 599 type = SCAN_SSID_FILTER_DISABLED;
@@ -603,7 +632,9 @@ wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl,
603 continue; 632 continue;
604 633
605 for (j = 0; j < cmd->n_ssids; j++) 634 for (j = 0; j < cmd->n_ssids; j++)
606 if (!memcmp(req->ssids[i].ssid, 635 if ((req->ssids[i].ssid_len ==
636 cmd->ssids[j].len) &&
637 !memcmp(req->ssids[i].ssid,
607 cmd->ssids[j].ssid, 638 cmd->ssids[j].ssid,
608 req->ssids[i].ssid_len)) { 639 req->ssids[i].ssid_len)) {
609 cmd->ssids[j].type = 640 cmd->ssids[j].type =
@@ -652,6 +683,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
652 if (!cfg) 683 if (!cfg)
653 return -ENOMEM; 684 return -ENOMEM;
654 685
686 cfg->role_id = wlvif->dev_role_id;
655 cfg->rssi_threshold = c->rssi_threshold; 687 cfg->rssi_threshold = c->rssi_threshold;
656 cfg->snr_threshold = c->snr_threshold; 688 cfg->snr_threshold = c->snr_threshold;
657 cfg->n_probe_reqs = c->num_probe_reqs; 689 cfg->n_probe_reqs = c->num_probe_reqs;
@@ -669,7 +701,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
669 cfg->intervals[i] = cpu_to_le32(req->interval); 701 cfg->intervals[i] = cpu_to_le32(req->interval);
670 702
671 cfg->ssid_len = 0; 703 cfg->ssid_len = 0;
672 ret = wl12xx_scan_sched_scan_ssid_list(wl, req); 704 ret = wl12xx_scan_sched_scan_ssid_list(wl, wlvif, req);
673 if (ret < 0) 705 if (ret < 0)
674 goto out; 706 goto out;
675 707
@@ -690,7 +722,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
690 req->ssids[0].ssid, 722 req->ssids[0].ssid,
691 req->ssids[0].ssid_len, 723 req->ssids[0].ssid_len,
692 ies->ie[band], 724 ies->ie[band],
693 ies->len[band]); 725 ies->len[band], true);
694 if (ret < 0) { 726 if (ret < 0) {
695 wl1271_error("2.4GHz PROBE request template failed"); 727 wl1271_error("2.4GHz PROBE request template failed");
696 goto out; 728 goto out;
@@ -704,7 +736,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
704 req->ssids[0].ssid, 736 req->ssids[0].ssid,
705 req->ssids[0].ssid_len, 737 req->ssids[0].ssid_len,
706 ies->ie[band], 738 ies->ie[band],
707 ies->len[band]); 739 ies->len[band], true);
708 if (ret < 0) { 740 if (ret < 0) {
709 wl1271_error("5GHz PROBE request template failed"); 741 wl1271_error("5GHz PROBE request template failed");
710 goto out; 742 goto out;
@@ -734,13 +766,15 @@ int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif)
734 if (wlvif->bss_type != BSS_TYPE_STA_BSS) 766 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
735 return -EOPNOTSUPP; 767 return -EOPNOTSUPP;
736 768
737 if (test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) 769 if ((wl->quirks & WLCORE_QUIRK_NO_SCHED_SCAN_WHILE_CONN) &&
770 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
738 return -EBUSY; 771 return -EBUSY;
739 772
740 start = kzalloc(sizeof(*start), GFP_KERNEL); 773 start = kzalloc(sizeof(*start), GFP_KERNEL);
741 if (!start) 774 if (!start)
742 return -ENOMEM; 775 return -ENOMEM;
743 776
777 start->role_id = wlvif->dev_role_id;
744 start->tag = WL1271_SCAN_DEFAULT_TAG; 778 start->tag = WL1271_SCAN_DEFAULT_TAG;
745 779
746 ret = wl1271_cmd_send(wl, CMD_START_PERIODIC_SCAN, start, 780 ret = wl1271_cmd_send(wl, CMD_START_PERIODIC_SCAN, start,
@@ -762,7 +796,7 @@ void wl1271_scan_sched_scan_results(struct wl1271 *wl)
762 ieee80211_sched_scan_results(wl->hw); 796 ieee80211_sched_scan_results(wl->hw);
763} 797}
764 798
765void wl1271_scan_sched_scan_stop(struct wl1271 *wl) 799void wl1271_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif)
766{ 800{
767 struct wl1271_cmd_sched_scan_stop *stop; 801 struct wl1271_cmd_sched_scan_stop *stop;
768 int ret = 0; 802 int ret = 0;
@@ -776,6 +810,7 @@ void wl1271_scan_sched_scan_stop(struct wl1271 *wl)
776 return; 810 return;
777 } 811 }
778 812
813 stop->role_id = wlvif->dev_role_id;
779 stop->tag = WL1271_SCAN_DEFAULT_TAG; 814 stop->tag = WL1271_SCAN_DEFAULT_TAG;
780 815
781 ret = wl1271_cmd_send(wl, CMD_STOP_PERIODIC_SCAN, stop, 816 ret = wl1271_cmd_send(wl, CMD_STOP_PERIODIC_SCAN, stop,
diff --git a/drivers/net/wireless/ti/wlcore/scan.h b/drivers/net/wireless/ti/wlcore/scan.h
index 81ee36ac2078..29f3c8d6b046 100644
--- a/drivers/net/wireless/ti/wlcore/scan.h
+++ b/drivers/net/wireless/ti/wlcore/scan.h
@@ -40,7 +40,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
40 struct cfg80211_sched_scan_request *req, 40 struct cfg80211_sched_scan_request *req,
41 struct ieee80211_sched_scan_ies *ies); 41 struct ieee80211_sched_scan_ies *ies);
42int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif); 42int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif);
43void wl1271_scan_sched_scan_stop(struct wl1271 *wl); 43void wl1271_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif);
44void wl1271_scan_sched_scan_results(struct wl1271 *wl); 44void wl1271_scan_sched_scan_results(struct wl1271 *wl);
45 45
46#define WL1271_SCAN_MAX_CHANNELS 24 46#define WL1271_SCAN_MAX_CHANNELS 24
@@ -142,7 +142,8 @@ enum {
142 SCAN_BSS_TYPE_ANY, 142 SCAN_BSS_TYPE_ANY,
143}; 143};
144 144
145#define SCAN_CHANNEL_FLAGS_DFS BIT(0) 145#define SCAN_CHANNEL_FLAGS_DFS BIT(0) /* channel is passive until an
146 activity is detected on it */
146#define SCAN_CHANNEL_FLAGS_DFS_ENABLED BIT(1) 147#define SCAN_CHANNEL_FLAGS_DFS_ENABLED BIT(1)
147 148
148struct conn_scan_ch_params { 149struct conn_scan_ch_params {
@@ -185,7 +186,10 @@ struct wl1271_cmd_sched_scan_config {
185 186
186 u8 dfs; 187 u8 dfs;
187 188
188 u8 padding[3]; 189 u8 n_pactive_ch; /* number of pactive (passive until fw detects energy)
190 channels in BG band */
191 u8 role_id;
192 u8 padding[1];
189 193
190 struct conn_scan_ch_params channels_2[MAX_CHANNELS_2GHZ]; 194 struct conn_scan_ch_params channels_2[MAX_CHANNELS_2GHZ];
191 struct conn_scan_ch_params channels_5[MAX_CHANNELS_5GHZ]; 195 struct conn_scan_ch_params channels_5[MAX_CHANNELS_5GHZ];
@@ -212,21 +216,24 @@ struct wl1271_cmd_sched_scan_ssid_list {
212 216
213 u8 n_ssids; 217 u8 n_ssids;
214 struct wl1271_ssid ssids[SCHED_SCAN_MAX_SSIDS]; 218 struct wl1271_ssid ssids[SCHED_SCAN_MAX_SSIDS];
215 u8 padding[3]; 219 u8 role_id;
220 u8 padding[2];
216} __packed; 221} __packed;
217 222
218struct wl1271_cmd_sched_scan_start { 223struct wl1271_cmd_sched_scan_start {
219 struct wl1271_cmd_header header; 224 struct wl1271_cmd_header header;
220 225
221 u8 tag; 226 u8 tag;
222 u8 padding[3]; 227 u8 role_id;
228 u8 padding[2];
223} __packed; 229} __packed;
224 230
225struct wl1271_cmd_sched_scan_stop { 231struct wl1271_cmd_sched_scan_stop {
226 struct wl1271_cmd_header header; 232 struct wl1271_cmd_header header;
227 233
228 u8 tag; 234 u8 tag;
229 u8 padding[3]; 235 u8 role_id;
236 u8 padding[2];
230} __packed; 237} __packed;
231 238
232 239
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 0a72347cfc4c..73ace4b2604e 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -25,6 +25,7 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/mmc/sdio.h>
28#include <linux/mmc/sdio_func.h> 29#include <linux/mmc/sdio_func.h>
29#include <linux/mmc/sdio_ids.h> 30#include <linux/mmc/sdio_ids.h>
30#include <linux/mmc/card.h> 31#include <linux/mmc/card.h>
@@ -32,6 +33,7 @@
32#include <linux/gpio.h> 33#include <linux/gpio.h>
33#include <linux/wl12xx.h> 34#include <linux/wl12xx.h>
34#include <linux/pm_runtime.h> 35#include <linux/pm_runtime.h>
36#include <linux/printk.h>
35 37
36#include "wlcore.h" 38#include "wlcore.h"
37#include "wl12xx_80211.h" 39#include "wl12xx_80211.h"
@@ -45,6 +47,8 @@
45#define SDIO_DEVICE_ID_TI_WL1271 0x4076 47#define SDIO_DEVICE_ID_TI_WL1271 0x4076
46#endif 48#endif
47 49
50static bool dump = false;
51
48struct wl12xx_sdio_glue { 52struct wl12xx_sdio_glue {
49 struct device *dev; 53 struct device *dev;
50 struct platform_device *core; 54 struct platform_device *core;
@@ -67,8 +71,8 @@ static void wl1271_sdio_set_block_size(struct device *child,
67 sdio_release_host(func); 71 sdio_release_host(func);
68} 72}
69 73
70static void wl12xx_sdio_raw_read(struct device *child, int addr, void *buf, 74static int __must_check wl12xx_sdio_raw_read(struct device *child, int addr,
71 size_t len, bool fixed) 75 void *buf, size_t len, bool fixed)
72{ 76{
73 int ret; 77 int ret;
74 struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent); 78 struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
@@ -76,6 +80,13 @@ static void wl12xx_sdio_raw_read(struct device *child, int addr, void *buf,
76 80
77 sdio_claim_host(func); 81 sdio_claim_host(func);
78 82
83 if (unlikely(dump)) {
84 printk(KERN_DEBUG "wlcore_sdio: READ from 0x%04x\n", addr);
85 print_hex_dump(KERN_DEBUG, "wlcore_sdio: READ ",
86 DUMP_PREFIX_OFFSET, 16, 1,
87 buf, len, false);
88 }
89
79 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG)) { 90 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG)) {
80 ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret); 91 ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
81 dev_dbg(child->parent, "sdio read 52 addr 0x%x, byte 0x%02x\n", 92 dev_dbg(child->parent, "sdio read 52 addr 0x%x, byte 0x%02x\n",
@@ -92,12 +103,14 @@ static void wl12xx_sdio_raw_read(struct device *child, int addr, void *buf,
92 103
93 sdio_release_host(func); 104 sdio_release_host(func);
94 105
95 if (ret) 106 if (WARN_ON(ret))
96 dev_err(child->parent, "sdio read failed (%d)\n", ret); 107 dev_err(child->parent, "sdio read failed (%d)\n", ret);
108
109 return ret;
97} 110}
98 111
99static void wl12xx_sdio_raw_write(struct device *child, int addr, void *buf, 112static int __must_check wl12xx_sdio_raw_write(struct device *child, int addr,
100 size_t len, bool fixed) 113 void *buf, size_t len, bool fixed)
101{ 114{
102 int ret; 115 int ret;
103 struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent); 116 struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
@@ -105,6 +118,13 @@ static void wl12xx_sdio_raw_write(struct device *child, int addr, void *buf,
105 118
106 sdio_claim_host(func); 119 sdio_claim_host(func);
107 120
121 if (unlikely(dump)) {
122 printk(KERN_DEBUG "wlcore_sdio: WRITE to 0x%04x\n", addr);
123 print_hex_dump(KERN_DEBUG, "wlcore_sdio: WRITE ",
124 DUMP_PREFIX_OFFSET, 16, 1,
125 buf, len, false);
126 }
127
108 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG)) { 128 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG)) {
109 sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret); 129 sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
110 dev_dbg(child->parent, "sdio write 52 addr 0x%x, byte 0x%02x\n", 130 dev_dbg(child->parent, "sdio write 52 addr 0x%x, byte 0x%02x\n",
@@ -121,25 +141,30 @@ static void wl12xx_sdio_raw_write(struct device *child, int addr, void *buf,
121 141
122 sdio_release_host(func); 142 sdio_release_host(func);
123 143
124 if (ret) 144 if (WARN_ON(ret))
125 dev_err(child->parent, "sdio write failed (%d)\n", ret); 145 dev_err(child->parent, "sdio write failed (%d)\n", ret);
146
147 return ret;
126} 148}
127 149
128static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue) 150static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue)
129{ 151{
130 int ret; 152 int ret;
131 struct sdio_func *func = dev_to_sdio_func(glue->dev); 153 struct sdio_func *func = dev_to_sdio_func(glue->dev);
154 struct mmc_card *card = func->card;
132 155
133 /* If enabled, tell runtime PM not to power off the card */ 156 ret = pm_runtime_get_sync(&card->dev);
134 if (pm_runtime_enabled(&func->dev)) { 157 if (ret) {
135 ret = pm_runtime_get_sync(&func->dev); 158 /*
136 if (ret < 0) 159 * Runtime PM might be temporarily disabled, or the device
137 goto out; 160 * might have a positive reference counter. Make sure it is
138 } else { 161 * really powered on.
139 /* Runtime PM is disabled: power up the card manually */ 162 */
140 ret = mmc_power_restore_host(func->card->host); 163 ret = mmc_power_restore_host(card->host);
141 if (ret < 0) 164 if (ret < 0) {
165 pm_runtime_put_sync(&card->dev);
142 goto out; 166 goto out;
167 }
143 } 168 }
144 169
145 sdio_claim_host(func); 170 sdio_claim_host(func);
@@ -154,20 +179,21 @@ static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue)
154{ 179{
155 int ret; 180 int ret;
156 struct sdio_func *func = dev_to_sdio_func(glue->dev); 181 struct sdio_func *func = dev_to_sdio_func(glue->dev);
182 struct mmc_card *card = func->card;
157 183
158 sdio_claim_host(func); 184 sdio_claim_host(func);
159 sdio_disable_func(func); 185 sdio_disable_func(func);
160 sdio_release_host(func); 186 sdio_release_host(func);
161 187
162 /* Power off the card manually, even if runtime PM is enabled. */ 188 /* Power off the card manually in case it wasn't powered off above */
163 ret = mmc_power_save_host(func->card->host); 189 ret = mmc_power_save_host(card->host);
164 if (ret < 0) 190 if (ret < 0)
165 return ret; 191 goto out;
166 192
167 /* If enabled, let runtime PM know the card is powered off */ 193 /* Let runtime PM know the card is powered off */
168 if (pm_runtime_enabled(&func->dev)) 194 pm_runtime_put_sync(&card->dev);
169 ret = pm_runtime_put_sync(&func->dev);
170 195
196out:
171 return ret; 197 return ret;
172} 198}
173 199
@@ -196,6 +222,7 @@ static int __devinit wl1271_probe(struct sdio_func *func,
196 struct resource res[1]; 222 struct resource res[1];
197 mmc_pm_flag_t mmcflags; 223 mmc_pm_flag_t mmcflags;
198 int ret = -ENOMEM; 224 int ret = -ENOMEM;
225 const char *chip_family;
199 226
200 /* We are only able to handle the wlan function */ 227 /* We are only able to handle the wlan function */
201 if (func->num != 0x02) 228 if (func->num != 0x02)
@@ -236,7 +263,18 @@ static int __devinit wl1271_probe(struct sdio_func *func,
236 /* Tell PM core that we don't need the card to be powered now */ 263 /* Tell PM core that we don't need the card to be powered now */
237 pm_runtime_put_noidle(&func->dev); 264 pm_runtime_put_noidle(&func->dev);
238 265
239 glue->core = platform_device_alloc("wl12xx", -1); 266 /*
267 * Due to a hardware bug, we can't differentiate wl18xx from
268 * wl12xx, because both report the same device ID. The only
269 * way to differentiate is by checking the SDIO revision,
270 * which is 3.00 on the wl18xx chips.
271 */
272 if (func->card->cccr.sdio_vsn == SDIO_SDIO_REV_3_00)
273 chip_family = "wl18xx";
274 else
275 chip_family = "wl12xx";
276
277 glue->core = platform_device_alloc(chip_family, -1);
240 if (!glue->core) { 278 if (!glue->core) {
241 dev_err(glue->dev, "can't allocate platform_device"); 279 dev_err(glue->dev, "can't allocate platform_device");
242 ret = -ENOMEM; 280 ret = -ENOMEM;
@@ -367,12 +405,9 @@ static void __exit wl1271_exit(void)
367module_init(wl1271_init); 405module_init(wl1271_init);
368module_exit(wl1271_exit); 406module_exit(wl1271_exit);
369 407
408module_param(dump, bool, S_IRUSR | S_IWUSR);
409MODULE_PARM_DESC(dump, "Enable sdio read/write dumps.");
410
370MODULE_LICENSE("GPL"); 411MODULE_LICENSE("GPL");
371MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); 412MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
372MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); 413MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
373MODULE_FIRMWARE(WL127X_FW_NAME_SINGLE);
374MODULE_FIRMWARE(WL127X_FW_NAME_MULTI);
375MODULE_FIRMWARE(WL127X_PLT_FW_NAME);
376MODULE_FIRMWARE(WL128X_FW_NAME_SINGLE);
377MODULE_FIRMWARE(WL128X_FW_NAME_MULTI);
378MODULE_FIRMWARE(WL128X_PLT_FW_NAME);
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 553cd3cbb98c..8da4ed243ebc 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -193,8 +193,8 @@ static int wl12xx_spi_read_busy(struct device *child)
193 return -ETIMEDOUT; 193 return -ETIMEDOUT;
194} 194}
195 195
196static void wl12xx_spi_raw_read(struct device *child, int addr, void *buf, 196static int __must_check wl12xx_spi_raw_read(struct device *child, int addr,
197 size_t len, bool fixed) 197 void *buf, size_t len, bool fixed)
198{ 198{
199 struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent); 199 struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
200 struct wl1271 *wl = dev_get_drvdata(child); 200 struct wl1271 *wl = dev_get_drvdata(child);
@@ -238,7 +238,7 @@ static void wl12xx_spi_raw_read(struct device *child, int addr, void *buf,
238 if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1) && 238 if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1) &&
239 wl12xx_spi_read_busy(child)) { 239 wl12xx_spi_read_busy(child)) {
240 memset(buf, 0, chunk_len); 240 memset(buf, 0, chunk_len);
241 return; 241 return 0;
242 } 242 }
243 243
244 spi_message_init(&m); 244 spi_message_init(&m);
@@ -256,10 +256,12 @@ static void wl12xx_spi_raw_read(struct device *child, int addr, void *buf,
256 buf += chunk_len; 256 buf += chunk_len;
257 len -= chunk_len; 257 len -= chunk_len;
258 } 258 }
259
260 return 0;
259} 261}
260 262
261static void wl12xx_spi_raw_write(struct device *child, int addr, void *buf, 263static int __must_check wl12xx_spi_raw_write(struct device *child, int addr,
262 size_t len, bool fixed) 264 void *buf, size_t len, bool fixed)
263{ 265{
264 struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent); 266 struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
265 struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS]; 267 struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS];
@@ -304,6 +306,8 @@ static void wl12xx_spi_raw_write(struct device *child, int addr, void *buf,
304 } 306 }
305 307
306 spi_sync(to_spi_device(glue->dev), &m); 308 spi_sync(to_spi_device(glue->dev), &m);
309
310 return 0;
307} 311}
308 312
309static struct wl1271_if_operations spi_ops = { 313static struct wl1271_if_operations spi_ops = {
@@ -431,10 +435,4 @@ module_exit(wl1271_exit);
431MODULE_LICENSE("GPL"); 435MODULE_LICENSE("GPL");
432MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); 436MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
433MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); 437MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
434MODULE_FIRMWARE(WL127X_FW_NAME_SINGLE);
435MODULE_FIRMWARE(WL127X_FW_NAME_MULTI);
436MODULE_FIRMWARE(WL127X_PLT_FW_NAME);
437MODULE_FIRMWARE(WL128X_FW_NAME_SINGLE);
438MODULE_FIRMWARE(WL128X_FW_NAME_MULTI);
439MODULE_FIRMWARE(WL128X_PLT_FW_NAME);
440MODULE_ALIAS("spi:wl1271"); 438MODULE_ALIAS("spi:wl1271");
diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c
index 0e59ea2cdd39..d6f57e2c03cf 100644
--- a/drivers/net/wireless/ti/wlcore/testmode.c
+++ b/drivers/net/wireless/ti/wlcore/testmode.c
@@ -40,7 +40,7 @@ enum wl1271_tm_commands {
40 WL1271_TM_CMD_CONFIGURE, 40 WL1271_TM_CMD_CONFIGURE,
41 WL1271_TM_CMD_NVS_PUSH, /* Not in use. Keep to not break ABI */ 41 WL1271_TM_CMD_NVS_PUSH, /* Not in use. Keep to not break ABI */
42 WL1271_TM_CMD_SET_PLT_MODE, 42 WL1271_TM_CMD_SET_PLT_MODE,
43 WL1271_TM_CMD_RECOVER, 43 WL1271_TM_CMD_RECOVER, /* Not in use. Keep to not break ABI */
44 WL1271_TM_CMD_GET_MAC, 44 WL1271_TM_CMD_GET_MAC,
45 45
46 __WL1271_TM_CMD_AFTER_LAST 46 __WL1271_TM_CMD_AFTER_LAST
@@ -108,6 +108,20 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
108 } 108 }
109 109
110 if (answer) { 110 if (answer) {
111 /* If we got bip calibration answer print radio status */
112 struct wl1271_cmd_cal_p2g *params =
113 (struct wl1271_cmd_cal_p2g *) buf;
114
115 s16 radio_status = (s16) le16_to_cpu(params->radio_status);
116
117 if (params->test.id == TEST_CMD_P2G_CAL &&
118 radio_status < 0)
119 wl1271_warning("testmode cmd: radio status=%d",
120 radio_status);
121 else
122 wl1271_info("testmode cmd: radio status=%d",
123 radio_status);
124
111 len = nla_total_size(buf_len); 125 len = nla_total_size(buf_len);
112 skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, len); 126 skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, len);
113 if (!skb) { 127 if (!skb) {
@@ -258,15 +272,6 @@ static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[])
258 return ret; 272 return ret;
259} 273}
260 274
261static int wl1271_tm_cmd_recover(struct wl1271 *wl, struct nlattr *tb[])
262{
263 wl1271_debug(DEBUG_TESTMODE, "testmode cmd recover");
264
265 wl12xx_queue_recovery_work(wl);
266
267 return 0;
268}
269
270static int wl12xx_tm_cmd_get_mac(struct wl1271 *wl, struct nlattr *tb[]) 275static int wl12xx_tm_cmd_get_mac(struct wl1271 *wl, struct nlattr *tb[])
271{ 276{
272 struct sk_buff *skb; 277 struct sk_buff *skb;
@@ -336,8 +341,6 @@ int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len)
336 return wl1271_tm_cmd_configure(wl, tb); 341 return wl1271_tm_cmd_configure(wl, tb);
337 case WL1271_TM_CMD_SET_PLT_MODE: 342 case WL1271_TM_CMD_SET_PLT_MODE:
338 return wl1271_tm_cmd_set_plt_mode(wl, tb); 343 return wl1271_tm_cmd_set_plt_mode(wl, tb);
339 case WL1271_TM_CMD_RECOVER:
340 return wl1271_tm_cmd_recover(wl, tb);
341 case WL1271_TM_CMD_GET_MAC: 344 case WL1271_TM_CMD_GET_MAC:
342 return wl12xx_tm_cmd_get_mac(wl, tb); 345 return wl12xx_tm_cmd_get_mac(wl, tb);
343 default: 346 default:
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index 6893bc207994..8038a5026933 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -72,7 +72,7 @@ static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
72 return id; 72 return id;
73} 73}
74 74
75static void wl1271_free_tx_id(struct wl1271 *wl, int id) 75void wl1271_free_tx_id(struct wl1271 *wl, int id)
76{ 76{
77 if (__test_and_clear_bit(id, wl->tx_frames_map)) { 77 if (__test_and_clear_bit(id, wl->tx_frames_map)) {
78 if (unlikely(wl->tx_frames_cnt == wl->num_tx_desc)) 78 if (unlikely(wl->tx_frames_cnt == wl->num_tx_desc))
@@ -82,6 +82,7 @@ static void wl1271_free_tx_id(struct wl1271 *wl, int id)
82 wl->tx_frames_cnt--; 82 wl->tx_frames_cnt--;
83 } 83 }
84} 84}
85EXPORT_SYMBOL(wl1271_free_tx_id);
85 86
86static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl, 87static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
87 struct sk_buff *skb) 88 struct sk_buff *skb)
@@ -127,6 +128,7 @@ bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
127{ 128{
128 return wl->dummy_packet == skb; 129 return wl->dummy_packet == skb;
129} 130}
131EXPORT_SYMBOL(wl12xx_is_dummy_packet);
130 132
131u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, 133u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
132 struct sk_buff *skb) 134 struct sk_buff *skb)
@@ -146,10 +148,10 @@ u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
146 return wl->system_hlid; 148 return wl->system_hlid;
147 149
148 hdr = (struct ieee80211_hdr *)skb->data; 150 hdr = (struct ieee80211_hdr *)skb->data;
149 if (ieee80211_is_mgmt(hdr->frame_control)) 151 if (is_multicast_ether_addr(ieee80211_get_DA(hdr)))
150 return wlvif->ap.global_hlid;
151 else
152 return wlvif->ap.bcast_hlid; 152 return wlvif->ap.bcast_hlid;
153 else
154 return wlvif->ap.global_hlid;
153 } 155 }
154} 156}
155 157
@@ -176,37 +178,34 @@ u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
176unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl, 178unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
177 unsigned int packet_length) 179 unsigned int packet_length)
178{ 180{
179 if (wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN) 181 if ((wl->quirks & WLCORE_QUIRK_TX_PAD_LAST_FRAME) ||
180 return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE); 182 !(wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN))
181 else
182 return ALIGN(packet_length, WL1271_TX_ALIGN_TO); 183 return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
184 else
185 return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
183} 186}
184EXPORT_SYMBOL(wlcore_calc_packet_alignment); 187EXPORT_SYMBOL(wlcore_calc_packet_alignment);
185 188
186static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif, 189static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
187 struct sk_buff *skb, u32 extra, u32 buf_offset, 190 struct sk_buff *skb, u32 extra, u32 buf_offset,
188 u8 hlid) 191 u8 hlid, bool is_gem)
189{ 192{
190 struct wl1271_tx_hw_descr *desc; 193 struct wl1271_tx_hw_descr *desc;
191 u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra; 194 u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
192 u32 total_blocks; 195 u32 total_blocks;
193 int id, ret = -EBUSY, ac; 196 int id, ret = -EBUSY, ac;
194 u32 spare_blocks = wl->normal_tx_spare; 197 u32 spare_blocks;
195 bool is_dummy = false;
196 198
197 if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE) 199 if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
198 return -EAGAIN; 200 return -EAGAIN;
199 201
202 spare_blocks = wlcore_hw_get_spare_blocks(wl, is_gem);
203
200 /* allocate free identifier for the packet */ 204 /* allocate free identifier for the packet */
201 id = wl1271_alloc_tx_id(wl, skb); 205 id = wl1271_alloc_tx_id(wl, skb);
202 if (id < 0) 206 if (id < 0)
203 return id; 207 return id;
204 208
205 if (unlikely(wl12xx_is_dummy_packet(wl, skb)))
206 is_dummy = true;
207 else if (wlvif->is_gem)
208 spare_blocks = wl->gem_tx_spare;
209
210 total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks); 209 total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks);
211 210
212 if (total_blocks <= wl->tx_blocks_available) { 211 if (total_blocks <= wl->tx_blocks_available) {
@@ -228,7 +227,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
228 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 227 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
229 wl->tx_allocated_pkts[ac]++; 228 wl->tx_allocated_pkts[ac]++;
230 229
231 if (!is_dummy && wlvif && 230 if (!wl12xx_is_dummy_packet(wl, skb) && wlvif &&
232 wlvif->bss_type == BSS_TYPE_AP_BSS && 231 wlvif->bss_type == BSS_TYPE_AP_BSS &&
233 test_bit(hlid, wlvif->ap.sta_hlid_map)) 232 test_bit(hlid, wlvif->ap.sta_hlid_map))
234 wl->links[hlid].allocated_pkts++; 233 wl->links[hlid].allocated_pkts++;
@@ -268,6 +267,7 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
268 if (extra) { 267 if (extra) {
269 int hdrlen = ieee80211_hdrlen(frame_control); 268 int hdrlen = ieee80211_hdrlen(frame_control);
270 memmove(frame_start, hdr, hdrlen); 269 memmove(frame_start, hdr, hdrlen);
270 skb_set_network_header(skb, skb_network_offset(skb) + extra);
271 } 271 }
272 272
273 /* configure packet life time */ 273 /* configure packet life time */
@@ -305,11 +305,15 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
305 if (is_dummy || !wlvif) 305 if (is_dummy || !wlvif)
306 rate_idx = 0; 306 rate_idx = 0;
307 else if (wlvif->bss_type != BSS_TYPE_AP_BSS) { 307 else if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
308 /* if the packets are destined for AP (have a STA entry) 308 /*
309 send them with AP rate policies, otherwise use default 309 * if the packets are destined for AP (have a STA entry)
310 basic rates */ 310 * send them with AP rate policies (EAPOLs are an exception),
311 * otherwise use default basic rates
312 */
311 if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE) 313 if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
312 rate_idx = wlvif->sta.p2p_rate_idx; 314 rate_idx = wlvif->sta.p2p_rate_idx;
315 else if (skb->protocol == cpu_to_be16(ETH_P_PAE))
316 rate_idx = wlvif->sta.basic_rate_idx;
313 else if (control->control.sta) 317 else if (control->control.sta)
314 rate_idx = wlvif->sta.ap_rate_idx; 318 rate_idx = wlvif->sta.ap_rate_idx;
315 else 319 else
@@ -330,9 +334,9 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
330 ieee80211_has_protected(frame_control)) 334 ieee80211_has_protected(frame_control))
331 tx_attr |= TX_HW_ATTR_HOST_ENCRYPT; 335 tx_attr |= TX_HW_ATTR_HOST_ENCRYPT;
332 336
333 desc->reserved = 0;
334 desc->tx_attr = cpu_to_le16(tx_attr); 337 desc->tx_attr = cpu_to_le16(tx_attr);
335 338
339 wlcore_hw_set_tx_desc_csum(wl, desc, skb);
336 wlcore_hw_set_tx_desc_data_len(wl, desc, skb); 340 wlcore_hw_set_tx_desc_data_len(wl, desc, skb);
337} 341}
338 342
@@ -346,16 +350,20 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
346 u32 total_len; 350 u32 total_len;
347 u8 hlid; 351 u8 hlid;
348 bool is_dummy; 352 bool is_dummy;
353 bool is_gem = false;
349 354
350 if (!skb) 355 if (!skb) {
356 wl1271_error("discarding null skb");
351 return -EINVAL; 357 return -EINVAL;
358 }
352 359
353 info = IEEE80211_SKB_CB(skb); 360 info = IEEE80211_SKB_CB(skb);
354 361
355 /* TODO: handle dummy packets on multi-vifs */ 362 /* TODO: handle dummy packets on multi-vifs */
356 is_dummy = wl12xx_is_dummy_packet(wl, skb); 363 is_dummy = wl12xx_is_dummy_packet(wl, skb);
357 364
358 if (info->control.hw_key && 365 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
366 info->control.hw_key &&
359 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) 367 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
360 extra = WL1271_EXTRA_SPACE_TKIP; 368 extra = WL1271_EXTRA_SPACE_TKIP;
361 369
@@ -373,6 +381,8 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
373 return ret; 381 return ret;
374 wlvif->default_key = idx; 382 wlvif->default_key = idx;
375 } 383 }
384
385 is_gem = (cipher == WL1271_CIPHER_SUITE_GEM);
376 } 386 }
377 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb); 387 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
378 if (hlid == WL12XX_INVALID_LINK_ID) { 388 if (hlid == WL12XX_INVALID_LINK_ID) {
@@ -380,7 +390,8 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
380 return -EINVAL; 390 return -EINVAL;
381 } 391 }
382 392
383 ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid); 393 ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid,
394 is_gem);
384 if (ret < 0) 395 if (ret < 0)
385 return ret; 396 return ret;
386 397
@@ -425,10 +436,10 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
425 rate_set >>= 1; 436 rate_set >>= 1;
426 } 437 }
427 438
428 /* MCS rates indication are on bits 16 - 23 */ 439 /* MCS rates indication are on bits 16 - 31 */
429 rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates; 440 rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates;
430 441
431 for (bit = 0; bit < 8; bit++) { 442 for (bit = 0; bit < 16; bit++) {
432 if (rate_set & 0x1) 443 if (rate_set & 0x1)
433 enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit); 444 enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit);
434 rate_set >>= 1; 445 rate_set >>= 1;
@@ -439,18 +450,15 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
439 450
440void wl1271_handle_tx_low_watermark(struct wl1271 *wl) 451void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
441{ 452{
442 unsigned long flags;
443 int i; 453 int i;
444 454
445 for (i = 0; i < NUM_TX_QUEUES; i++) { 455 for (i = 0; i < NUM_TX_QUEUES; i++) {
446 if (test_bit(i, &wl->stopped_queues_map) && 456 if (wlcore_is_queue_stopped_by_reason(wl, i,
457 WLCORE_QUEUE_STOP_REASON_WATERMARK) &&
447 wl->tx_queue_count[i] <= WL1271_TX_QUEUE_LOW_WATERMARK) { 458 wl->tx_queue_count[i] <= WL1271_TX_QUEUE_LOW_WATERMARK) {
448 /* firmware buffer has space, restart queues */ 459 /* firmware buffer has space, restart queues */
449 spin_lock_irqsave(&wl->wl_lock, flags); 460 wlcore_wake_queue(wl, i,
450 ieee80211_wake_queue(wl->hw, 461 WLCORE_QUEUE_STOP_REASON_WATERMARK);
451 wl1271_tx_get_mac80211_queue(i));
452 clear_bit(i, &wl->stopped_queues_map);
453 spin_unlock_irqrestore(&wl->wl_lock, flags);
454 } 462 }
455 } 463 }
456} 464}
@@ -656,18 +664,29 @@ void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids)
656 } 664 }
657} 665}
658 666
659void wl1271_tx_work_locked(struct wl1271 *wl) 667/*
668 * Returns failure values only in case of failed bus ops within this function.
669 * wl1271_prepare_tx_frame retvals won't be returned in order to avoid
670 * triggering recovery by higher layers when not necessary.
671 * In case a FW command fails within wl1271_prepare_tx_frame fails a recovery
672 * will be queued in wl1271_cmd_send. -EAGAIN/-EBUSY from prepare_tx_frame
673 * can occur and are legitimate so don't propagate. -EINVAL will emit a WARNING
674 * within prepare_tx_frame code but there's nothing we should do about those
675 * as well.
676 */
677int wlcore_tx_work_locked(struct wl1271 *wl)
660{ 678{
661 struct wl12xx_vif *wlvif; 679 struct wl12xx_vif *wlvif;
662 struct sk_buff *skb; 680 struct sk_buff *skb;
663 struct wl1271_tx_hw_descr *desc; 681 struct wl1271_tx_hw_descr *desc;
664 u32 buf_offset = 0; 682 u32 buf_offset = 0, last_len = 0;
665 bool sent_packets = false; 683 bool sent_packets = false;
666 unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0}; 684 unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
667 int ret; 685 int ret = 0;
686 int bus_ret = 0;
668 687
669 if (unlikely(wl->state == WL1271_STATE_OFF)) 688 if (unlikely(wl->state == WL1271_STATE_OFF))
670 return; 689 return 0;
671 690
672 while ((skb = wl1271_skb_dequeue(wl))) { 691 while ((skb = wl1271_skb_dequeue(wl))) {
673 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 692 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -685,8 +704,14 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
685 * Flush buffer and try again. 704 * Flush buffer and try again.
686 */ 705 */
687 wl1271_skb_queue_head(wl, wlvif, skb); 706 wl1271_skb_queue_head(wl, wlvif, skb);
688 wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf, 707
689 buf_offset, true); 708 buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset,
709 last_len);
710 bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA,
711 wl->aggr_buf, buf_offset, true);
712 if (bus_ret < 0)
713 goto out;
714
690 sent_packets = true; 715 sent_packets = true;
691 buf_offset = 0; 716 buf_offset = 0;
692 continue; 717 continue;
@@ -710,7 +735,8 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
710 ieee80211_free_txskb(wl->hw, skb); 735 ieee80211_free_txskb(wl->hw, skb);
711 goto out_ack; 736 goto out_ack;
712 } 737 }
713 buf_offset += ret; 738 last_len = ret;
739 buf_offset += last_len;
714 wl->tx_packets_count++; 740 wl->tx_packets_count++;
715 if (has_data) { 741 if (has_data) {
716 desc = (struct wl1271_tx_hw_descr *) skb->data; 742 desc = (struct wl1271_tx_hw_descr *) skb->data;
@@ -720,8 +746,12 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
720 746
721out_ack: 747out_ack:
722 if (buf_offset) { 748 if (buf_offset) {
723 wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf, 749 buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, last_len);
724 buf_offset, true); 750 bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
751 buf_offset, true);
752 if (bus_ret < 0)
753 goto out;
754
725 sent_packets = true; 755 sent_packets = true;
726 } 756 }
727 if (sent_packets) { 757 if (sent_packets) {
@@ -729,13 +759,19 @@ out_ack:
729 * Interrupt the firmware with the new packets. This is only 759 * Interrupt the firmware with the new packets. This is only
730 * required for older hardware revisions 760 * required for older hardware revisions
731 */ 761 */
732 if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) 762 if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) {
733 wl1271_write32(wl, WL12XX_HOST_WR_ACCESS, 763 bus_ret = wlcore_write32(wl, WL12XX_HOST_WR_ACCESS,
734 wl->tx_packets_count); 764 wl->tx_packets_count);
765 if (bus_ret < 0)
766 goto out;
767 }
735 768
736 wl1271_handle_tx_low_watermark(wl); 769 wl1271_handle_tx_low_watermark(wl);
737 } 770 }
738 wl12xx_rearm_rx_streaming(wl, active_hlids); 771 wl12xx_rearm_rx_streaming(wl, active_hlids);
772
773out:
774 return bus_ret;
739} 775}
740 776
741void wl1271_tx_work(struct work_struct *work) 777void wl1271_tx_work(struct work_struct *work)
@@ -748,7 +784,11 @@ void wl1271_tx_work(struct work_struct *work)
748 if (ret < 0) 784 if (ret < 0)
749 goto out; 785 goto out;
750 786
751 wl1271_tx_work_locked(wl); 787 ret = wlcore_tx_work_locked(wl);
788 if (ret < 0) {
789 wl12xx_queue_recovery_work(wl);
790 goto out;
791 }
752 792
753 wl1271_ps_elp_sleep(wl); 793 wl1271_ps_elp_sleep(wl);
754out: 794out:
@@ -849,7 +889,8 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
849 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); 889 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
850 890
851 /* remove TKIP header space if present */ 891 /* remove TKIP header space if present */
852 if (info->control.hw_key && 892 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
893 info->control.hw_key &&
853 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) { 894 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
854 int hdrlen = ieee80211_get_hdrlen_from_skb(skb); 895 int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
855 memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data, 896 memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data,
@@ -869,22 +910,27 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
869} 910}
870 911
871/* Called upon reception of a TX complete interrupt */ 912/* Called upon reception of a TX complete interrupt */
872void wl1271_tx_complete(struct wl1271 *wl) 913int wlcore_tx_complete(struct wl1271 *wl)
873{ 914{
874 struct wl1271_acx_mem_map *memmap = 915 struct wl1271_acx_mem_map *memmap = wl->target_mem_map;
875 (struct wl1271_acx_mem_map *)wl->target_mem_map;
876 u32 count, fw_counter; 916 u32 count, fw_counter;
877 u32 i; 917 u32 i;
918 int ret;
878 919
879 /* read the tx results from the chipset */ 920 /* read the tx results from the chipset */
880 wl1271_read(wl, le32_to_cpu(memmap->tx_result), 921 ret = wlcore_read(wl, le32_to_cpu(memmap->tx_result),
881 wl->tx_res_if, sizeof(*wl->tx_res_if), false); 922 wl->tx_res_if, sizeof(*wl->tx_res_if), false);
923 if (ret < 0)
924 goto out;
925
882 fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter); 926 fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter);
883 927
884 /* write host counter to chipset (to ack) */ 928 /* write host counter to chipset (to ack) */
885 wl1271_write32(wl, le32_to_cpu(memmap->tx_result) + 929 ret = wlcore_write32(wl, le32_to_cpu(memmap->tx_result) +
886 offsetof(struct wl1271_tx_hw_res_if, 930 offsetof(struct wl1271_tx_hw_res_if,
887 tx_result_host_counter), fw_counter); 931 tx_result_host_counter), fw_counter);
932 if (ret < 0)
933 goto out;
888 934
889 count = fw_counter - wl->tx_results_count; 935 count = fw_counter - wl->tx_results_count;
890 wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count); 936 wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
@@ -904,8 +950,11 @@ void wl1271_tx_complete(struct wl1271 *wl)
904 950
905 wl->tx_results_count++; 951 wl->tx_results_count++;
906 } 952 }
953
954out:
955 return ret;
907} 956}
908EXPORT_SYMBOL(wl1271_tx_complete); 957EXPORT_SYMBOL(wlcore_tx_complete);
909 958
910void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid) 959void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
911{ 960{
@@ -958,7 +1007,7 @@ void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
958 1007
959} 1008}
960/* caller must hold wl->mutex and TX must be stopped */ 1009/* caller must hold wl->mutex and TX must be stopped */
961void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues) 1010void wl12xx_tx_reset(struct wl1271 *wl)
962{ 1011{
963 int i; 1012 int i;
964 struct sk_buff *skb; 1013 struct sk_buff *skb;
@@ -973,15 +1022,12 @@ void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
973 wl->tx_queue_count[i] = 0; 1022 wl->tx_queue_count[i] = 0;
974 } 1023 }
975 1024
976 wl->stopped_queues_map = 0;
977
978 /* 1025 /*
979 * Make sure the driver is at a consistent state, in case this 1026 * Make sure the driver is at a consistent state, in case this
980 * function is called from a context other than interface removal. 1027 * function is called from a context other than interface removal.
981 * This call will always wake the TX queues. 1028 * This call will always wake the TX queues.
982 */ 1029 */
983 if (reset_tx_queues) 1030 wl1271_handle_tx_low_watermark(wl);
984 wl1271_handle_tx_low_watermark(wl);
985 1031
986 for (i = 0; i < wl->num_tx_desc; i++) { 1032 for (i = 0; i < wl->num_tx_desc; i++) {
987 if (wl->tx_frames[i] == NULL) 1033 if (wl->tx_frames[i] == NULL)
@@ -998,7 +1044,8 @@ void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
998 */ 1044 */
999 info = IEEE80211_SKB_CB(skb); 1045 info = IEEE80211_SKB_CB(skb);
1000 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); 1046 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
1001 if (info->control.hw_key && 1047 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
1048 info->control.hw_key &&
1002 info->control.hw_key->cipher == 1049 info->control.hw_key->cipher ==
1003 WLAN_CIPHER_SUITE_TKIP) { 1050 WLAN_CIPHER_SUITE_TKIP) {
1004 int hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1051 int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
@@ -1024,6 +1071,11 @@ void wl1271_tx_flush(struct wl1271 *wl)
1024 int i; 1071 int i;
1025 timeout = jiffies + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT); 1072 timeout = jiffies + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
1026 1073
1074 /* only one flush should be in progress, for consistent queue state */
1075 mutex_lock(&wl->flush_mutex);
1076
1077 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
1078
1027 while (!time_after(jiffies, timeout)) { 1079 while (!time_after(jiffies, timeout)) {
1028 mutex_lock(&wl->mutex); 1080 mutex_lock(&wl->mutex);
1029 wl1271_debug(DEBUG_TX, "flushing tx buffer: %d %d", 1081 wl1271_debug(DEBUG_TX, "flushing tx buffer: %d %d",
@@ -1032,7 +1084,7 @@ void wl1271_tx_flush(struct wl1271 *wl)
1032 if ((wl->tx_frames_cnt == 0) && 1084 if ((wl->tx_frames_cnt == 0) &&
1033 (wl1271_tx_total_queue_count(wl) == 0)) { 1085 (wl1271_tx_total_queue_count(wl) == 0)) {
1034 mutex_unlock(&wl->mutex); 1086 mutex_unlock(&wl->mutex);
1035 return; 1087 goto out;
1036 } 1088 }
1037 mutex_unlock(&wl->mutex); 1089 mutex_unlock(&wl->mutex);
1038 msleep(1); 1090 msleep(1);
@@ -1045,7 +1097,12 @@ void wl1271_tx_flush(struct wl1271 *wl)
1045 for (i = 0; i < WL12XX_MAX_LINKS; i++) 1097 for (i = 0; i < WL12XX_MAX_LINKS; i++)
1046 wl1271_tx_reset_link_queues(wl, i); 1098 wl1271_tx_reset_link_queues(wl, i);
1047 mutex_unlock(&wl->mutex); 1099 mutex_unlock(&wl->mutex);
1100
1101out:
1102 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
1103 mutex_unlock(&wl->flush_mutex);
1048} 1104}
1105EXPORT_SYMBOL_GPL(wl1271_tx_flush);
1049 1106
1050u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set) 1107u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set)
1051{ 1108{
@@ -1054,3 +1111,96 @@ u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set)
1054 1111
1055 return BIT(__ffs(rate_set)); 1112 return BIT(__ffs(rate_set));
1056} 1113}
1114
1115void wlcore_stop_queue_locked(struct wl1271 *wl, u8 queue,
1116 enum wlcore_queue_stop_reason reason)
1117{
1118 bool stopped = !!wl->queue_stop_reasons[queue];
1119
1120 /* queue should not be stopped for this reason */
1121 WARN_ON(test_and_set_bit(reason, &wl->queue_stop_reasons[queue]));
1122
1123 if (stopped)
1124 return;
1125
1126 ieee80211_stop_queue(wl->hw, wl1271_tx_get_mac80211_queue(queue));
1127}
1128
1129void wlcore_stop_queue(struct wl1271 *wl, u8 queue,
1130 enum wlcore_queue_stop_reason reason)
1131{
1132 unsigned long flags;
1133
1134 spin_lock_irqsave(&wl->wl_lock, flags);
1135 wlcore_stop_queue_locked(wl, queue, reason);
1136 spin_unlock_irqrestore(&wl->wl_lock, flags);
1137}
1138
1139void wlcore_wake_queue(struct wl1271 *wl, u8 queue,
1140 enum wlcore_queue_stop_reason reason)
1141{
1142 unsigned long flags;
1143
1144 spin_lock_irqsave(&wl->wl_lock, flags);
1145
1146 /* queue should not be clear for this reason */
1147 WARN_ON(!test_and_clear_bit(reason, &wl->queue_stop_reasons[queue]));
1148
1149 if (wl->queue_stop_reasons[queue])
1150 goto out;
1151
1152 ieee80211_wake_queue(wl->hw, wl1271_tx_get_mac80211_queue(queue));
1153
1154out:
1155 spin_unlock_irqrestore(&wl->wl_lock, flags);
1156}
1157
1158void wlcore_stop_queues(struct wl1271 *wl,
1159 enum wlcore_queue_stop_reason reason)
1160{
1161 int i;
1162
1163 for (i = 0; i < NUM_TX_QUEUES; i++)
1164 wlcore_stop_queue(wl, i, reason);
1165}
1166EXPORT_SYMBOL_GPL(wlcore_stop_queues);
1167
1168void wlcore_wake_queues(struct wl1271 *wl,
1169 enum wlcore_queue_stop_reason reason)
1170{
1171 int i;
1172
1173 for (i = 0; i < NUM_TX_QUEUES; i++)
1174 wlcore_wake_queue(wl, i, reason);
1175}
1176EXPORT_SYMBOL_GPL(wlcore_wake_queues);
1177
1178void wlcore_reset_stopped_queues(struct wl1271 *wl)
1179{
1180 int i;
1181 unsigned long flags;
1182
1183 spin_lock_irqsave(&wl->wl_lock, flags);
1184
1185 for (i = 0; i < NUM_TX_QUEUES; i++) {
1186 if (!wl->queue_stop_reasons[i])
1187 continue;
1188
1189 wl->queue_stop_reasons[i] = 0;
1190 ieee80211_wake_queue(wl->hw,
1191 wl1271_tx_get_mac80211_queue(i));
1192 }
1193
1194 spin_unlock_irqrestore(&wl->wl_lock, flags);
1195}
1196
1197bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl, u8 queue,
1198 enum wlcore_queue_stop_reason reason)
1199{
1200 return test_bit(reason, &wl->queue_stop_reasons[queue]);
1201}
1202
1203bool wlcore_is_queue_stopped(struct wl1271 *wl, u8 queue)
1204{
1205 return !!wl->queue_stop_reasons[queue];
1206}
diff --git a/drivers/net/wireless/ti/wlcore/tx.h b/drivers/net/wireless/ti/wlcore/tx.h
index 2fd6e5dc6f75..1e939b016155 100644
--- a/drivers/net/wireless/ti/wlcore/tx.h
+++ b/drivers/net/wireless/ti/wlcore/tx.h
@@ -85,6 +85,19 @@ struct wl128x_tx_mem {
85 u8 extra_bytes; 85 u8 extra_bytes;
86} __packed; 86} __packed;
87 87
88struct wl18xx_tx_mem {
89 /*
90 * Total number of memory blocks allocated by the host for
91 * this packet.
92 */
93 u8 total_mem_blocks;
94
95 /*
96 * control bits
97 */
98 u8 ctrl;
99} __packed;
100
88/* 101/*
89 * On wl128x based devices, when TX packets are aggregated, each packet 102 * On wl128x based devices, when TX packets are aggregated, each packet
90 * size must be aligned to the SDIO block size. The maximum block size 103 * size must be aligned to the SDIO block size. The maximum block size
@@ -100,6 +113,7 @@ struct wl1271_tx_hw_descr {
100 union { 113 union {
101 struct wl127x_tx_mem wl127x_mem; 114 struct wl127x_tx_mem wl127x_mem;
102 struct wl128x_tx_mem wl128x_mem; 115 struct wl128x_tx_mem wl128x_mem;
116 struct wl18xx_tx_mem wl18xx_mem;
103 } __packed; 117 } __packed;
104 /* Device time (in us) when the packet arrived to the driver */ 118 /* Device time (in us) when the packet arrived to the driver */
105 __le32 start_time; 119 __le32 start_time;
@@ -116,7 +130,16 @@ struct wl1271_tx_hw_descr {
116 u8 tid; 130 u8 tid;
117 /* host link ID (HLID) */ 131 /* host link ID (HLID) */
118 u8 hlid; 132 u8 hlid;
119 u8 reserved; 133
134 union {
135 u8 wl12xx_reserved;
136
137 /*
138 * bit 0 -> 0 = udp, 1 = tcp
139 * bit 1:7 -> IP header offset
140 */
141 u8 wl18xx_checksum_data;
142 } __packed;
120} __packed; 143} __packed;
121 144
122enum wl1271_tx_hw_res_status { 145enum wl1271_tx_hw_res_status {
@@ -161,6 +184,13 @@ struct wl1271_tx_hw_res_if {
161 struct wl1271_tx_hw_res_descr tx_results_queue[TX_HW_RESULT_QUEUE_LEN]; 184 struct wl1271_tx_hw_res_descr tx_results_queue[TX_HW_RESULT_QUEUE_LEN];
162} __packed; 185} __packed;
163 186
187enum wlcore_queue_stop_reason {
188 WLCORE_QUEUE_STOP_REASON_WATERMARK,
189 WLCORE_QUEUE_STOP_REASON_FW_RESTART,
190 WLCORE_QUEUE_STOP_REASON_FLUSH,
191 WLCORE_QUEUE_STOP_REASON_SPARE_BLK, /* 18xx specific */
192};
193
164static inline int wl1271_tx_get_queue(int queue) 194static inline int wl1271_tx_get_queue(int queue)
165{ 195{
166 switch (queue) { 196 switch (queue) {
@@ -204,10 +234,10 @@ static inline int wl1271_tx_total_queue_count(struct wl1271 *wl)
204} 234}
205 235
206void wl1271_tx_work(struct work_struct *work); 236void wl1271_tx_work(struct work_struct *work);
207void wl1271_tx_work_locked(struct wl1271 *wl); 237int wlcore_tx_work_locked(struct wl1271 *wl);
208void wl1271_tx_complete(struct wl1271 *wl); 238int wlcore_tx_complete(struct wl1271 *wl);
209void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif); 239void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif);
210void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues); 240void wl12xx_tx_reset(struct wl1271 *wl);
211void wl1271_tx_flush(struct wl1271 *wl); 241void wl1271_tx_flush(struct wl1271 *wl);
212u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band); 242u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band);
213u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, 243u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
@@ -223,6 +253,21 @@ bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb);
223void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids); 253void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids);
224unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl, 254unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
225 unsigned int packet_length); 255 unsigned int packet_length);
256void wl1271_free_tx_id(struct wl1271 *wl, int id);
257void wlcore_stop_queue_locked(struct wl1271 *wl, u8 queue,
258 enum wlcore_queue_stop_reason reason);
259void wlcore_stop_queue(struct wl1271 *wl, u8 queue,
260 enum wlcore_queue_stop_reason reason);
261void wlcore_wake_queue(struct wl1271 *wl, u8 queue,
262 enum wlcore_queue_stop_reason reason);
263void wlcore_stop_queues(struct wl1271 *wl,
264 enum wlcore_queue_stop_reason reason);
265void wlcore_wake_queues(struct wl1271 *wl,
266 enum wlcore_queue_stop_reason reason);
267void wlcore_reset_stopped_queues(struct wl1271 *wl);
268bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl, u8 queue,
269 enum wlcore_queue_stop_reason reason);
270bool wlcore_is_queue_stopped(struct wl1271 *wl, u8 queue);
226 271
227/* from main.c */ 272/* from main.c */
228void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid); 273void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid);
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index 0b3f0b586f4b..27ccc275a1c1 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -24,8 +24,9 @@
24 24
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26 26
27#include "wl12xx.h" 27#include "wlcore_i.h"
28#include "event.h" 28#include "event.h"
29#include "boot.h"
29 30
30/* The maximum number of Tx descriptors in all chip families */ 31/* The maximum number of Tx descriptors in all chip families */
31#define WLCORE_MAX_TX_DESCRIPTORS 32 32#define WLCORE_MAX_TX_DESCRIPTORS 32
@@ -33,14 +34,16 @@
33/* forward declaration */ 34/* forward declaration */
34struct wl1271_tx_hw_descr; 35struct wl1271_tx_hw_descr;
35enum wl_rx_buf_align; 36enum wl_rx_buf_align;
37struct wl1271_rx_descriptor;
36 38
37struct wlcore_ops { 39struct wlcore_ops {
38 int (*identify_chip)(struct wl1271 *wl); 40 int (*identify_chip)(struct wl1271 *wl);
39 int (*identify_fw)(struct wl1271 *wl); 41 int (*identify_fw)(struct wl1271 *wl);
40 int (*boot)(struct wl1271 *wl); 42 int (*boot)(struct wl1271 *wl);
41 void (*trigger_cmd)(struct wl1271 *wl, int cmd_box_addr, 43 int (*plt_init)(struct wl1271 *wl);
42 void *buf, size_t len); 44 int (*trigger_cmd)(struct wl1271 *wl, int cmd_box_addr,
43 void (*ack_event)(struct wl1271 *wl); 45 void *buf, size_t len);
46 int (*ack_event)(struct wl1271 *wl);
44 u32 (*calc_tx_blocks)(struct wl1271 *wl, u32 len, u32 spare_blks); 47 u32 (*calc_tx_blocks)(struct wl1271 *wl, u32 len, u32 spare_blks);
45 void (*set_tx_desc_blocks)(struct wl1271 *wl, 48 void (*set_tx_desc_blocks)(struct wl1271 *wl,
46 struct wl1271_tx_hw_descr *desc, 49 struct wl1271_tx_hw_descr *desc,
@@ -50,17 +53,34 @@ struct wlcore_ops {
50 struct sk_buff *skb); 53 struct sk_buff *skb);
51 enum wl_rx_buf_align (*get_rx_buf_align)(struct wl1271 *wl, 54 enum wl_rx_buf_align (*get_rx_buf_align)(struct wl1271 *wl,
52 u32 rx_desc); 55 u32 rx_desc);
53 void (*prepare_read)(struct wl1271 *wl, u32 rx_desc, u32 len); 56 int (*prepare_read)(struct wl1271 *wl, u32 rx_desc, u32 len);
54 u32 (*get_rx_packet_len)(struct wl1271 *wl, void *rx_data, 57 u32 (*get_rx_packet_len)(struct wl1271 *wl, void *rx_data,
55 u32 data_len); 58 u32 data_len);
56 void (*tx_delayed_compl)(struct wl1271 *wl); 59 int (*tx_delayed_compl)(struct wl1271 *wl);
57 void (*tx_immediate_compl)(struct wl1271 *wl); 60 void (*tx_immediate_compl)(struct wl1271 *wl);
58 int (*hw_init)(struct wl1271 *wl); 61 int (*hw_init)(struct wl1271 *wl);
59 int (*init_vif)(struct wl1271 *wl, struct wl12xx_vif *wlvif); 62 int (*init_vif)(struct wl1271 *wl, struct wl12xx_vif *wlvif);
60 u32 (*sta_get_ap_rate_mask)(struct wl1271 *wl, 63 u32 (*sta_get_ap_rate_mask)(struct wl1271 *wl,
61 struct wl12xx_vif *wlvif); 64 struct wl12xx_vif *wlvif);
62 s8 (*get_pg_ver)(struct wl1271 *wl); 65 int (*get_pg_ver)(struct wl1271 *wl, s8 *ver);
63 void (*get_mac)(struct wl1271 *wl); 66 int (*get_mac)(struct wl1271 *wl);
67 void (*set_tx_desc_csum)(struct wl1271 *wl,
68 struct wl1271_tx_hw_descr *desc,
69 struct sk_buff *skb);
70 void (*set_rx_csum)(struct wl1271 *wl,
71 struct wl1271_rx_descriptor *desc,
72 struct sk_buff *skb);
73 u32 (*ap_get_mimo_wide_rate_mask)(struct wl1271 *wl,
74 struct wl12xx_vif *wlvif);
75 int (*debugfs_init)(struct wl1271 *wl, struct dentry *rootdir);
76 int (*handle_static_data)(struct wl1271 *wl,
77 struct wl1271_static_data *static_data);
78 int (*get_spare_blocks)(struct wl1271 *wl, bool is_gem);
79 int (*set_key)(struct wl1271 *wl, enum set_key_cmd cmd,
80 struct ieee80211_vif *vif,
81 struct ieee80211_sta *sta,
82 struct ieee80211_key_conf *key_conf);
83 u32 (*pre_pkt_send)(struct wl1271 *wl, u32 buf_offset, u32 last_len);
64}; 84};
65 85
66enum wlcore_partitions { 86enum wlcore_partitions {
@@ -109,6 +129,15 @@ enum wlcore_registers {
109 REG_TABLE_LEN, 129 REG_TABLE_LEN,
110}; 130};
111 131
132struct wl1271_stats {
133 void *fw_stats;
134 unsigned long fw_stats_update;
135 size_t fw_stats_len;
136
137 unsigned int retry_count;
138 unsigned int excessive_retries;
139};
140
112struct wl1271 { 141struct wl1271 {
113 struct ieee80211_hw *hw; 142 struct ieee80211_hw *hw;
114 bool mac80211_registered; 143 bool mac80211_registered;
@@ -121,7 +150,6 @@ struct wl1271 {
121 150
122 void (*set_power)(bool enable); 151 void (*set_power)(bool enable);
123 int irq; 152 int irq;
124 int ref_clock;
125 153
126 spinlock_t wl_lock; 154 spinlock_t wl_lock;
127 155
@@ -186,7 +214,7 @@ struct wl1271 {
186 214
187 /* Frames scheduled for transmission, not handled yet */ 215 /* Frames scheduled for transmission, not handled yet */
188 int tx_queue_count[NUM_TX_QUEUES]; 216 int tx_queue_count[NUM_TX_QUEUES];
189 long stopped_queues_map; 217 unsigned long queue_stop_reasons[NUM_TX_QUEUES];
190 218
191 /* Frames received, not handled yet by mac80211 */ 219 /* Frames received, not handled yet by mac80211 */
192 struct sk_buff_head deferred_rx_queue; 220 struct sk_buff_head deferred_rx_queue;
@@ -205,9 +233,6 @@ struct wl1271 {
205 /* FW Rx counter */ 233 /* FW Rx counter */
206 u32 rx_counter; 234 u32 rx_counter;
207 235
208 /* Rx memory pool address */
209 struct wl1271_rx_mem_pool_addr rx_mem_pool_addr;
210
211 /* Intermediate buffer, used for packet aggregation */ 236 /* Intermediate buffer, used for packet aggregation */
212 u8 *aggr_buf; 237 u8 *aggr_buf;
213 238
@@ -228,6 +253,7 @@ struct wl1271 {
228 253
229 /* Hardware recovery work */ 254 /* Hardware recovery work */
230 struct work_struct recovery_work; 255 struct work_struct recovery_work;
256 bool watchdog_recovery;
231 257
232 /* Pointer that holds DMA-friendly block for the mailbox */ 258 /* Pointer that holds DMA-friendly block for the mailbox */
233 struct event_mailbox *mbox; 259 struct event_mailbox *mbox;
@@ -263,7 +289,8 @@ struct wl1271 {
263 u32 buffer_cmd; 289 u32 buffer_cmd;
264 u32 buffer_busyword[WL1271_BUSY_WORD_CNT]; 290 u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
265 291
266 struct wl_fw_status *fw_status; 292 struct wl_fw_status_1 *fw_status_1;
293 struct wl_fw_status_2 *fw_status_2;
267 struct wl1271_tx_hw_res_if *tx_res_if; 294 struct wl1271_tx_hw_res_if *tx_res_if;
268 295
269 /* Current chipset configuration */ 296 /* Current chipset configuration */
@@ -277,9 +304,7 @@ struct wl1271 {
277 s8 noise; 304 s8 noise;
278 305
279 /* bands supported by this instance of wl12xx */ 306 /* bands supported by this instance of wl12xx */
280 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; 307 struct ieee80211_supported_band bands[WLCORE_NUM_BANDS];
281
282 int tcxo_clock;
283 308
284 /* 309 /*
285 * wowlan trigger was configured during suspend. 310 * wowlan trigger was configured during suspend.
@@ -333,10 +358,8 @@ struct wl1271 {
333 358
334 /* number of TX descriptors the HW supports. */ 359 /* number of TX descriptors the HW supports. */
335 u32 num_tx_desc; 360 u32 num_tx_desc;
336 361 /* number of RX descriptors the HW supports. */
337 /* spare Tx blocks for normal/GEM operating modes */ 362 u32 num_rx_desc;
338 u32 normal_tx_spare;
339 u32 gem_tx_spare;
340 363
341 /* translate HW Tx rates to standard rate-indices */ 364 /* translate HW Tx rates to standard rate-indices */
342 const u8 **band_rate_to_idx; 365 const u8 **band_rate_to_idx;
@@ -348,19 +371,57 @@ struct wl1271 {
348 u8 hw_min_ht_rate; 371 u8 hw_min_ht_rate;
349 372
350 /* HW HT (11n) capabilities */ 373 /* HW HT (11n) capabilities */
351 struct ieee80211_sta_ht_cap ht_cap; 374 struct ieee80211_sta_ht_cap ht_cap[WLCORE_NUM_BANDS];
352 375
353 /* size of the private FW status data */ 376 /* size of the private FW status data */
354 size_t fw_status_priv_len; 377 size_t fw_status_priv_len;
355 378
356 /* RX Data filter rule state - enabled/disabled */ 379 /* RX Data filter rule state - enabled/disabled */
357 bool rx_filter_enabled[WL1271_MAX_RX_FILTERS]; 380 bool rx_filter_enabled[WL1271_MAX_RX_FILTERS];
381
382 /* size of the private static data */
383 size_t static_data_priv_len;
384
385 /* the current channel type */
386 enum nl80211_channel_type channel_type;
387
388 /* mutex for protecting the tx_flush function */
389 struct mutex flush_mutex;
390
391 /* sleep auth value currently configured to FW */
392 int sleep_auth;
393
394 /* the minimum FW version required for the driver to work */
395 unsigned int min_fw_ver[NUM_FW_VER];
358}; 396};
359 397
360int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev); 398int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev);
361int __devexit wlcore_remove(struct platform_device *pdev); 399int __devexit wlcore_remove(struct platform_device *pdev);
362struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size); 400struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size);
363int wlcore_free_hw(struct wl1271 *wl); 401int wlcore_free_hw(struct wl1271 *wl);
402int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
403 struct ieee80211_vif *vif,
404 struct ieee80211_sta *sta,
405 struct ieee80211_key_conf *key_conf);
406
407static inline void
408wlcore_set_ht_cap(struct wl1271 *wl, enum ieee80211_band band,
409 struct ieee80211_sta_ht_cap *ht_cap)
410{
411 memcpy(&wl->ht_cap[band], ht_cap, sizeof(*ht_cap));
412}
413
414static inline void
415wlcore_set_min_fw_ver(struct wl1271 *wl, unsigned int chip,
416 unsigned int iftype, unsigned int major,
417 unsigned int subtype, unsigned int minor)
418{
419 wl->min_fw_ver[FW_VER_CHIP] = chip;
420 wl->min_fw_ver[FW_VER_IF_TYPE] = iftype;
421 wl->min_fw_ver[FW_VER_MAJOR] = major;
422 wl->min_fw_ver[FW_VER_SUBTYPE] = subtype;
423 wl->min_fw_ver[FW_VER_MINOR] = minor;
424}
364 425
365/* Firmware image load chunk size */ 426/* Firmware image load chunk size */
366#define CHUNK_SIZE 16384 427#define CHUNK_SIZE 16384
@@ -385,6 +446,18 @@ int wlcore_free_hw(struct wl1271 *wl);
385/* Some firmwares may not support ELP */ 446/* Some firmwares may not support ELP */
386#define WLCORE_QUIRK_NO_ELP BIT(6) 447#define WLCORE_QUIRK_NO_ELP BIT(6)
387 448
449/* pad only the last frame in the aggregate buffer */
450#define WLCORE_QUIRK_TX_PAD_LAST_FRAME BIT(7)
451
452/* extra header space is required for TKIP */
453#define WLCORE_QUIRK_TKIP_HEADER_SPACE BIT(8)
454
455/* Some firmwares not support sched scans while connected */
456#define WLCORE_QUIRK_NO_SCHED_SCAN_WHILE_CONN BIT(9)
457
458/* separate probe response templates for one-shot and sched scans */
459#define WLCORE_QUIRK_DUAL_PROBE_TMPL BIT(10)
460
388/* TODO: move to the lower drivers when all usages are abstracted */ 461/* TODO: move to the lower drivers when all usages are abstracted */
389#define CHIP_ID_1271_PG10 (0x4030101) 462#define CHIP_ID_1271_PG10 (0x4030101)
390#define CHIP_ID_1271_PG20 (0x4030111) 463#define CHIP_ID_1271_PG20 (0x4030111)
diff --git a/drivers/net/wireless/ti/wlcore/wl12xx.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index f12bdf745180..0187eef4fb07 100644
--- a/drivers/net/wireless/ti/wlcore/wl12xx.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -22,8 +22,8 @@
22 * 22 *
23 */ 23 */
24 24
25#ifndef __WL12XX_H__ 25#ifndef __WLCORE_I_H__
26#define __WL12XX_H__ 26#define __WLCORE_I_H__
27 27
28#include <linux/mutex.h> 28#include <linux/mutex.h>
29#include <linux/completion.h> 29#include <linux/completion.h>
@@ -35,15 +35,6 @@
35#include "conf.h" 35#include "conf.h"
36#include "ini.h" 36#include "ini.h"
37 37
38#define WL127X_FW_NAME_MULTI "ti-connectivity/wl127x-fw-4-mr.bin"
39#define WL127X_FW_NAME_SINGLE "ti-connectivity/wl127x-fw-4-sr.bin"
40
41#define WL128X_FW_NAME_MULTI "ti-connectivity/wl128x-fw-4-mr.bin"
42#define WL128X_FW_NAME_SINGLE "ti-connectivity/wl128x-fw-4-sr.bin"
43
44#define WL127X_PLT_FW_NAME "ti-connectivity/wl127x-fw-4-plt.bin"
45#define WL128X_PLT_FW_NAME "ti-connectivity/wl128x-fw-4-plt.bin"
46
47/* 38/*
48 * wl127x and wl128x are using the same NVS file name. However, the 39 * wl127x and wl128x are using the same NVS file name. However, the
49 * ini parameters between them are different. The driver validates 40 * ini parameters between them are different. The driver validates
@@ -71,6 +62,9 @@
71#define WL12XX_INVALID_ROLE_ID 0xff 62#define WL12XX_INVALID_ROLE_ID 0xff
72#define WL12XX_INVALID_LINK_ID 0xff 63#define WL12XX_INVALID_LINK_ID 0xff
73 64
65/* the driver supports the 2.4Ghz and 5Ghz bands */
66#define WLCORE_NUM_BANDS 2
67
74#define WL12XX_MAX_RATE_POLICIES 16 68#define WL12XX_MAX_RATE_POLICIES 16
75 69
76/* Defined by FW as 0. Will not be freed or allocated. */ 70/* Defined by FW as 0. Will not be freed or allocated. */
@@ -89,7 +83,7 @@
89#define WL1271_AP_BSS_INDEX 0 83#define WL1271_AP_BSS_INDEX 0
90#define WL1271_AP_DEF_BEACON_EXP 20 84#define WL1271_AP_DEF_BEACON_EXP 20
91 85
92#define WL1271_AGGR_BUFFER_SIZE (4 * PAGE_SIZE) 86#define WL1271_AGGR_BUFFER_SIZE (5 * PAGE_SIZE)
93 87
94enum wl1271_state { 88enum wl1271_state {
95 WL1271_STATE_OFF, 89 WL1271_STATE_OFF,
@@ -132,16 +126,7 @@ struct wl1271_chip {
132 unsigned int fw_ver[NUM_FW_VER]; 126 unsigned int fw_ver[NUM_FW_VER];
133}; 127};
134 128
135struct wl1271_stats {
136 struct acx_statistics *fw_stats;
137 unsigned long fw_stats_update;
138
139 unsigned int retry_count;
140 unsigned int excessive_retries;
141};
142
143#define NUM_TX_QUEUES 4 129#define NUM_TX_QUEUES 4
144#define NUM_RX_PKT_DESC 8
145 130
146#define AP_MAX_STATIONS 8 131#define AP_MAX_STATIONS 8
147 132
@@ -159,13 +144,26 @@ struct wl_fw_packet_counters {
159} __packed; 144} __packed;
160 145
161/* FW status registers */ 146/* FW status registers */
162struct wl_fw_status { 147struct wl_fw_status_1 {
163 __le32 intr; 148 __le32 intr;
164 u8 fw_rx_counter; 149 u8 fw_rx_counter;
165 u8 drv_rx_counter; 150 u8 drv_rx_counter;
166 u8 reserved; 151 u8 reserved;
167 u8 tx_results_counter; 152 u8 tx_results_counter;
168 __le32 rx_pkt_descs[NUM_RX_PKT_DESC]; 153 __le32 rx_pkt_descs[0];
154} __packed;
155
156/*
157 * Each HW arch has a different number of Rx descriptors.
158 * The length of the status depends on it, since it holds an array
159 * of descriptors.
160 */
161#define WLCORE_FW_STATUS_1_LEN(num_rx_desc) \
162 (sizeof(struct wl_fw_status_1) + \
163 (sizeof(((struct wl_fw_status_1 *)0)->rx_pkt_descs[0])) * \
164 num_rx_desc)
165
166struct wl_fw_status_2 {
169 __le32 fw_localtime; 167 __le32 fw_localtime;
170 168
171 /* 169 /*
@@ -194,11 +192,6 @@ struct wl_fw_status {
194 u8 priv[0]; 192 u8 priv[0];
195} __packed; 193} __packed;
196 194
197struct wl1271_rx_mem_pool_addr {
198 u32 addr;
199 u32 addr_extra;
200};
201
202#define WL1271_MAX_CHANNELS 64 195#define WL1271_MAX_CHANNELS 64
203struct wl1271_scan { 196struct wl1271_scan {
204 struct cfg80211_scan_request *req; 197 struct cfg80211_scan_request *req;
@@ -210,10 +203,10 @@ struct wl1271_scan {
210}; 203};
211 204
212struct wl1271_if_operations { 205struct wl1271_if_operations {
213 void (*read)(struct device *child, int addr, void *buf, size_t len, 206 int __must_check (*read)(struct device *child, int addr, void *buf,
214 bool fixed); 207 size_t len, bool fixed);
215 void (*write)(struct device *child, int addr, void *buf, size_t len, 208 int __must_check (*write)(struct device *child, int addr, void *buf,
216 bool fixed); 209 size_t len, bool fixed);
217 void (*reset)(struct device *child); 210 void (*reset)(struct device *child);
218 void (*init)(struct device *child); 211 void (*init)(struct device *child);
219 int (*power)(struct device *child, bool enable); 212 int (*power)(struct device *child, bool enable);
@@ -248,6 +241,7 @@ enum wl12xx_flags {
248 WL1271_FLAG_RECOVERY_IN_PROGRESS, 241 WL1271_FLAG_RECOVERY_IN_PROGRESS,
249 WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, 242 WL1271_FLAG_VIF_CHANGE_IN_PROGRESS,
250 WL1271_FLAG_INTENDED_FW_RECOVERY, 243 WL1271_FLAG_INTENDED_FW_RECOVERY,
244 WL1271_FLAG_IO_FAILED,
251}; 245};
252 246
253enum wl12xx_vif_flags { 247enum wl12xx_vif_flags {
@@ -367,8 +361,9 @@ struct wl12xx_vif {
367 /* The current band */ 361 /* The current band */
368 enum ieee80211_band band; 362 enum ieee80211_band band;
369 int channel; 363 int channel;
364 enum nl80211_channel_type channel_type;
370 365
371 u32 bitrate_masks[IEEE80211_NUM_BANDS]; 366 u32 bitrate_masks[WLCORE_NUM_BANDS];
372 u32 basic_rate_set; 367 u32 basic_rate_set;
373 368
374 /* 369 /*
@@ -417,9 +412,6 @@ struct wl12xx_vif {
417 struct work_struct rx_streaming_disable_work; 412 struct work_struct rx_streaming_disable_work;
418 struct timer_list rx_streaming_timer; 413 struct timer_list rx_streaming_timer;
419 414
420 /* does the current role use GEM for encryption (AP or STA) */
421 bool is_gem;
422
423 /* 415 /*
424 * This struct must be last! 416 * This struct must be last!
425 * data that has to be saved acrossed reconfigs (e.g. recovery) 417 * data that has to be saved acrossed reconfigs (e.g. recovery)
@@ -501,7 +493,8 @@ void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
501/* Macros to handle wl1271.sta_rate_set */ 493/* Macros to handle wl1271.sta_rate_set */
502#define HW_BG_RATES_MASK 0xffff 494#define HW_BG_RATES_MASK 0xffff
503#define HW_HT_RATES_OFFSET 16 495#define HW_HT_RATES_OFFSET 16
496#define HW_MIMO_RATES_OFFSET 24
504 497
505#define WL12XX_HW_BLOCK_SIZE 256 498#define WL12XX_HW_BLOCK_SIZE 256
506 499
507#endif 500#endif /* __WLCORE_I_H__ */
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index 117c4123943c..7ab922209b25 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -827,7 +827,7 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values,
827static inline int zd_ioread32_locked(struct zd_chip *chip, u32 *value, 827static inline int zd_ioread32_locked(struct zd_chip *chip, u32 *value,
828 const zd_addr_t addr) 828 const zd_addr_t addr)
829{ 829{
830 return zd_ioread32v_locked(chip, value, (const zd_addr_t *)&addr, 1); 830 return zd_ioread32v_locked(chip, value, &addr, 1);
831} 831}
832 832
833static inline int zd_iowrite16_locked(struct zd_chip *chip, u16 value, 833static inline int zd_iowrite16_locked(struct zd_chip *chip, u16 value,
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h
index 99193b456a79..45e3bb28a01c 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.h
+++ b/drivers/net/wireless/zd1211rw/zd_usb.h
@@ -274,7 +274,7 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
274static inline int zd_usb_ioread16(struct zd_usb *usb, u16 *value, 274static inline int zd_usb_ioread16(struct zd_usb *usb, u16 *value,
275 const zd_addr_t addr) 275 const zd_addr_t addr)
276{ 276{
277 return zd_usb_ioread16v(usb, value, (const zd_addr_t *)&addr, 1); 277 return zd_usb_ioread16v(usb, value, &addr, 1);
278} 278}
279 279
280void zd_usb_iowrite16v_async_start(struct zd_usb *usb); 280void zd_usb_iowrite16v_async_start(struct zd_usb *usb);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index f4a6fcaeffb1..682633bfe00f 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1363,8 +1363,6 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1363 INVALID_PENDING_IDX); 1363 INVALID_PENDING_IDX);
1364 } 1364 }
1365 1365
1366 __skb_queue_tail(&netbk->tx_queue, skb);
1367
1368 netbk->pending_cons++; 1366 netbk->pending_cons++;
1369 1367
1370 request_gop = xen_netbk_get_requests(netbk, vif, 1368 request_gop = xen_netbk_get_requests(netbk, vif,
@@ -1376,6 +1374,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1376 } 1374 }
1377 gop = request_gop; 1375 gop = request_gop;
1378 1376
1377 __skb_queue_tail(&netbk->tx_queue, skb);
1378
1379 vif->tx.req_cons = idx; 1379 vif->tx.req_cons = idx;
1380 xen_netbk_check_rx_xenvif(vif); 1380 xen_netbk_check_rx_xenvif(vif);
1381 1381
diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
index 1f74a77d040d..e7fd4938f9bc 100644
--- a/drivers/nfc/nfcwilink.c
+++ b/drivers/nfc/nfcwilink.c
@@ -535,9 +535,10 @@ static int nfcwilink_probe(struct platform_device *pdev)
535 drv->pdev = pdev; 535 drv->pdev = pdev;
536 536
537 protocols = NFC_PROTO_JEWEL_MASK 537 protocols = NFC_PROTO_JEWEL_MASK
538 | NFC_PROTO_MIFARE_MASK | NFC_PROTO_FELICA_MASK 538 | NFC_PROTO_MIFARE_MASK | NFC_PROTO_FELICA_MASK
539 | NFC_PROTO_ISO14443_MASK 539 | NFC_PROTO_ISO14443_MASK
540 | NFC_PROTO_NFC_DEP_MASK; 540 | NFC_PROTO_ISO14443_B_MASK
541 | NFC_PROTO_NFC_DEP_MASK;
541 542
542 drv->ndev = nci_allocate_device(&nfcwilink_ops, 543 drv->ndev = nci_allocate_device(&nfcwilink_ops,
543 protocols, 544 protocols,
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index 19110f0eb15f..d606f52fec84 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -38,13 +38,51 @@
38#define SCM_VENDOR_ID 0x4E6 38#define SCM_VENDOR_ID 0x4E6
39#define SCL3711_PRODUCT_ID 0x5591 39#define SCL3711_PRODUCT_ID 0x5591
40 40
41#define SONY_VENDOR_ID 0x054c
42#define PASORI_PRODUCT_ID 0x02e1
43
44#define PN533_QUIRKS_TYPE_A BIT(0)
45#define PN533_QUIRKS_TYPE_F BIT(1)
46#define PN533_QUIRKS_DEP BIT(2)
47#define PN533_QUIRKS_RAW_EXCHANGE BIT(3)
48
49#define PN533_DEVICE_STD 0x1
50#define PN533_DEVICE_PASORI 0x2
51
52#define PN533_ALL_PROTOCOLS (NFC_PROTO_JEWEL_MASK | NFC_PROTO_MIFARE_MASK |\
53 NFC_PROTO_FELICA_MASK | NFC_PROTO_ISO14443_MASK |\
54 NFC_PROTO_NFC_DEP_MASK |\
55 NFC_PROTO_ISO14443_B_MASK)
56
57#define PN533_NO_TYPE_B_PROTOCOLS (NFC_PROTO_JEWEL_MASK | \
58 NFC_PROTO_MIFARE_MASK | \
59 NFC_PROTO_FELICA_MASK | \
60 NFC_PROTO_ISO14443_MASK | \
61 NFC_PROTO_NFC_DEP_MASK)
62
41static const struct usb_device_id pn533_table[] = { 63static const struct usb_device_id pn533_table[] = {
42 { USB_DEVICE(PN533_VENDOR_ID, PN533_PRODUCT_ID) }, 64 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE,
43 { USB_DEVICE(SCM_VENDOR_ID, SCL3711_PRODUCT_ID) }, 65 .idVendor = PN533_VENDOR_ID,
66 .idProduct = PN533_PRODUCT_ID,
67 .driver_info = PN533_DEVICE_STD,
68 },
69 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE,
70 .idVendor = SCM_VENDOR_ID,
71 .idProduct = SCL3711_PRODUCT_ID,
72 .driver_info = PN533_DEVICE_STD,
73 },
74 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE,
75 .idVendor = SONY_VENDOR_ID,
76 .idProduct = PASORI_PRODUCT_ID,
77 .driver_info = PN533_DEVICE_PASORI,
78 },
44 { } 79 { }
45}; 80};
46MODULE_DEVICE_TABLE(usb, pn533_table); 81MODULE_DEVICE_TABLE(usb, pn533_table);
47 82
83/* How much time we spend listening for initiators */
84#define PN533_LISTEN_TIME 2
85
48/* frame definitions */ 86/* frame definitions */
49#define PN533_FRAME_TAIL_SIZE 2 87#define PN533_FRAME_TAIL_SIZE 2
50#define PN533_FRAME_SIZE(f) (sizeof(struct pn533_frame) + f->datalen + \ 88#define PN533_FRAME_SIZE(f) (sizeof(struct pn533_frame) + f->datalen + \
@@ -69,11 +107,16 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
69#define PN533_CMD_GET_FIRMWARE_VERSION 0x02 107#define PN533_CMD_GET_FIRMWARE_VERSION 0x02
70#define PN533_CMD_RF_CONFIGURATION 0x32 108#define PN533_CMD_RF_CONFIGURATION 0x32
71#define PN533_CMD_IN_DATA_EXCHANGE 0x40 109#define PN533_CMD_IN_DATA_EXCHANGE 0x40
110#define PN533_CMD_IN_COMM_THRU 0x42
72#define PN533_CMD_IN_LIST_PASSIVE_TARGET 0x4A 111#define PN533_CMD_IN_LIST_PASSIVE_TARGET 0x4A
73#define PN533_CMD_IN_ATR 0x50 112#define PN533_CMD_IN_ATR 0x50
74#define PN533_CMD_IN_RELEASE 0x52 113#define PN533_CMD_IN_RELEASE 0x52
75#define PN533_CMD_IN_JUMP_FOR_DEP 0x56 114#define PN533_CMD_IN_JUMP_FOR_DEP 0x56
76 115
116#define PN533_CMD_TG_INIT_AS_TARGET 0x8c
117#define PN533_CMD_TG_GET_DATA 0x86
118#define PN533_CMD_TG_SET_DATA 0x8e
119
77#define PN533_CMD_RESPONSE(cmd) (cmd + 1) 120#define PN533_CMD_RESPONSE(cmd) (cmd + 1)
78 121
79/* PN533 Return codes */ 122/* PN533 Return codes */
@@ -81,6 +124,9 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
81#define PN533_CMD_MI_MASK 0x40 124#define PN533_CMD_MI_MASK 0x40
82#define PN533_CMD_RET_SUCCESS 0x00 125#define PN533_CMD_RET_SUCCESS 0x00
83 126
127/* PN533 status codes */
128#define PN533_STATUS_TARGET_RELEASED 0x29
129
84struct pn533; 130struct pn533;
85 131
86typedef int (*pn533_cmd_complete_t) (struct pn533 *dev, void *arg, 132typedef int (*pn533_cmd_complete_t) (struct pn533 *dev, void *arg,
@@ -97,7 +143,14 @@ struct pn533_fw_version {
97}; 143};
98 144
99/* PN533_CMD_RF_CONFIGURATION */ 145/* PN533_CMD_RF_CONFIGURATION */
146#define PN533_CFGITEM_TIMING 0x02
100#define PN533_CFGITEM_MAX_RETRIES 0x05 147#define PN533_CFGITEM_MAX_RETRIES 0x05
148#define PN533_CFGITEM_PASORI 0x82
149
150#define PN533_CONFIG_TIMING_102 0xb
151#define PN533_CONFIG_TIMING_204 0xc
152#define PN533_CONFIG_TIMING_409 0xd
153#define PN533_CONFIG_TIMING_819 0xe
101 154
102#define PN533_CONFIG_MAX_RETRIES_NO_RETRY 0x00 155#define PN533_CONFIG_MAX_RETRIES_NO_RETRY 0x00
103#define PN533_CONFIG_MAX_RETRIES_ENDLESS 0xFF 156#define PN533_CONFIG_MAX_RETRIES_ENDLESS 0xFF
@@ -108,6 +161,12 @@ struct pn533_config_max_retries {
108 u8 mx_rty_passive_act; 161 u8 mx_rty_passive_act;
109} __packed; 162} __packed;
110 163
164struct pn533_config_timing {
165 u8 rfu;
166 u8 atr_res_timeout;
167 u8 dep_timeout;
168} __packed;
169
111/* PN533_CMD_IN_LIST_PASSIVE_TARGET */ 170/* PN533_CMD_IN_LIST_PASSIVE_TARGET */
112 171
113/* felica commands opcode */ 172/* felica commands opcode */
@@ -144,6 +203,7 @@ enum {
144 PN533_POLL_MOD_424KBPS_FELICA, 203 PN533_POLL_MOD_424KBPS_FELICA,
145 PN533_POLL_MOD_106KBPS_JEWEL, 204 PN533_POLL_MOD_106KBPS_JEWEL,
146 PN533_POLL_MOD_847KBPS_B, 205 PN533_POLL_MOD_847KBPS_B,
206 PN533_LISTEN_MOD,
147 207
148 __PN533_POLL_MOD_AFTER_LAST, 208 __PN533_POLL_MOD_AFTER_LAST,
149}; 209};
@@ -211,6 +271,9 @@ const struct pn533_poll_modulations poll_mod[] = {
211 }, 271 },
212 .len = 3, 272 .len = 3,
213 }, 273 },
274 [PN533_LISTEN_MOD] = {
275 .len = 0,
276 },
214}; 277};
215 278
216/* PN533_CMD_IN_ATR */ 279/* PN533_CMD_IN_ATR */
@@ -237,7 +300,7 @@ struct pn533_cmd_jump_dep {
237 u8 active; 300 u8 active;
238 u8 baud; 301 u8 baud;
239 u8 next; 302 u8 next;
240 u8 gt[]; 303 u8 data[];
241} __packed; 304} __packed;
242 305
243struct pn533_cmd_jump_dep_response { 306struct pn533_cmd_jump_dep_response {
@@ -253,6 +316,29 @@ struct pn533_cmd_jump_dep_response {
253 u8 gt[]; 316 u8 gt[];
254} __packed; 317} __packed;
255 318
319
320/* PN533_TG_INIT_AS_TARGET */
321#define PN533_INIT_TARGET_PASSIVE 0x1
322#define PN533_INIT_TARGET_DEP 0x2
323
324#define PN533_INIT_TARGET_RESP_FRAME_MASK 0x3
325#define PN533_INIT_TARGET_RESP_ACTIVE 0x1
326#define PN533_INIT_TARGET_RESP_DEP 0x4
327
328struct pn533_cmd_init_target {
329 u8 mode;
330 u8 mifare[6];
331 u8 felica[18];
332 u8 nfcid3[10];
333 u8 gb_len;
334 u8 gb[];
335} __packed;
336
337struct pn533_cmd_init_target_response {
338 u8 mode;
339 u8 cmd[];
340} __packed;
341
256struct pn533 { 342struct pn533 {
257 struct usb_device *udev; 343 struct usb_device *udev;
258 struct usb_interface *interface; 344 struct usb_interface *interface;
@@ -270,22 +356,33 @@ struct pn533 {
270 356
271 struct workqueue_struct *wq; 357 struct workqueue_struct *wq;
272 struct work_struct cmd_work; 358 struct work_struct cmd_work;
359 struct work_struct poll_work;
273 struct work_struct mi_work; 360 struct work_struct mi_work;
361 struct work_struct tg_work;
362 struct timer_list listen_timer;
274 struct pn533_frame *wq_in_frame; 363 struct pn533_frame *wq_in_frame;
275 int wq_in_error; 364 int wq_in_error;
365 int cancel_listen;
276 366
277 pn533_cmd_complete_t cmd_complete; 367 pn533_cmd_complete_t cmd_complete;
278 void *cmd_complete_arg; 368 void *cmd_complete_arg;
279 struct semaphore cmd_lock; 369 struct mutex cmd_lock;
280 u8 cmd; 370 u8 cmd;
281 371
282 struct pn533_poll_modulations *poll_mod_active[PN533_POLL_MOD_MAX + 1]; 372 struct pn533_poll_modulations *poll_mod_active[PN533_POLL_MOD_MAX + 1];
283 u8 poll_mod_count; 373 u8 poll_mod_count;
284 u8 poll_mod_curr; 374 u8 poll_mod_curr;
285 u32 poll_protocols; 375 u32 poll_protocols;
376 u32 listen_protocols;
377
378 u8 *gb;
379 size_t gb_len;
286 380
287 u8 tgt_available_prots; 381 u8 tgt_available_prots;
288 u8 tgt_active_prot; 382 u8 tgt_active_prot;
383 u8 tgt_mode;
384
385 u32 device_type;
289}; 386};
290 387
291struct pn533_frame { 388struct pn533_frame {
@@ -405,7 +502,7 @@ static void pn533_wq_cmd_complete(struct work_struct *work)
405 PN533_FRAME_CMD_PARAMS_LEN(in_frame)); 502 PN533_FRAME_CMD_PARAMS_LEN(in_frame));
406 503
407 if (rc != -EINPROGRESS) 504 if (rc != -EINPROGRESS)
408 up(&dev->cmd_lock); 505 mutex_unlock(&dev->cmd_lock);
409} 506}
410 507
411static void pn533_recv_response(struct urb *urb) 508static void pn533_recv_response(struct urb *urb)
@@ -583,7 +680,7 @@ static int pn533_send_cmd_frame_async(struct pn533 *dev,
583 680
584 nfc_dev_dbg(&dev->interface->dev, "%s", __func__); 681 nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
585 682
586 if (down_trylock(&dev->cmd_lock)) 683 if (!mutex_trylock(&dev->cmd_lock))
587 return -EBUSY; 684 return -EBUSY;
588 685
589 rc = __pn533_send_cmd_frame_async(dev, out_frame, in_frame, 686 rc = __pn533_send_cmd_frame_async(dev, out_frame, in_frame,
@@ -593,7 +690,7 @@ static int pn533_send_cmd_frame_async(struct pn533 *dev,
593 690
594 return 0; 691 return 0;
595error: 692error:
596 up(&dev->cmd_lock); 693 mutex_unlock(&dev->cmd_lock);
597 return rc; 694 return rc;
598} 695}
599 696
@@ -892,7 +989,7 @@ static int pn533_target_found_type_b(struct nfc_target *nfc_tgt, u8 *tgt_data,
892 if (!pn533_target_type_b_is_valid(tgt_type_b, tgt_data_len)) 989 if (!pn533_target_type_b_is_valid(tgt_type_b, tgt_data_len))
893 return -EPROTO; 990 return -EPROTO;
894 991
895 nfc_tgt->supported_protocols = NFC_PROTO_ISO14443_MASK; 992 nfc_tgt->supported_protocols = NFC_PROTO_ISO14443_B_MASK;
896 993
897 return 0; 994 return 0;
898} 995}
@@ -963,6 +1060,11 @@ static int pn533_target_found(struct pn533 *dev,
963 return 0; 1060 return 0;
964} 1061}
965 1062
1063static inline void pn533_poll_next_mod(struct pn533 *dev)
1064{
1065 dev->poll_mod_curr = (dev->poll_mod_curr + 1) % dev->poll_mod_count;
1066}
1067
966static void pn533_poll_reset_mod_list(struct pn533 *dev) 1068static void pn533_poll_reset_mod_list(struct pn533 *dev)
967{ 1069{
968 dev->poll_mod_count = 0; 1070 dev->poll_mod_count = 0;
@@ -975,102 +1077,283 @@ static void pn533_poll_add_mod(struct pn533 *dev, u8 mod_index)
975 dev->poll_mod_count++; 1077 dev->poll_mod_count++;
976} 1078}
977 1079
978static void pn533_poll_create_mod_list(struct pn533 *dev, u32 protocols) 1080static void pn533_poll_create_mod_list(struct pn533 *dev,
1081 u32 im_protocols, u32 tm_protocols)
979{ 1082{
980 pn533_poll_reset_mod_list(dev); 1083 pn533_poll_reset_mod_list(dev);
981 1084
982 if (protocols & NFC_PROTO_MIFARE_MASK 1085 if (im_protocols & NFC_PROTO_MIFARE_MASK
983 || protocols & NFC_PROTO_ISO14443_MASK 1086 || im_protocols & NFC_PROTO_ISO14443_MASK
984 || protocols & NFC_PROTO_NFC_DEP_MASK) 1087 || im_protocols & NFC_PROTO_NFC_DEP_MASK)
985 pn533_poll_add_mod(dev, PN533_POLL_MOD_106KBPS_A); 1088 pn533_poll_add_mod(dev, PN533_POLL_MOD_106KBPS_A);
986 1089
987 if (protocols & NFC_PROTO_FELICA_MASK 1090 if (im_protocols & NFC_PROTO_FELICA_MASK
988 || protocols & NFC_PROTO_NFC_DEP_MASK) { 1091 || im_protocols & NFC_PROTO_NFC_DEP_MASK) {
989 pn533_poll_add_mod(dev, PN533_POLL_MOD_212KBPS_FELICA); 1092 pn533_poll_add_mod(dev, PN533_POLL_MOD_212KBPS_FELICA);
990 pn533_poll_add_mod(dev, PN533_POLL_MOD_424KBPS_FELICA); 1093 pn533_poll_add_mod(dev, PN533_POLL_MOD_424KBPS_FELICA);
991 } 1094 }
992 1095
993 if (protocols & NFC_PROTO_JEWEL_MASK) 1096 if (im_protocols & NFC_PROTO_JEWEL_MASK)
994 pn533_poll_add_mod(dev, PN533_POLL_MOD_106KBPS_JEWEL); 1097 pn533_poll_add_mod(dev, PN533_POLL_MOD_106KBPS_JEWEL);
995 1098
996 if (protocols & NFC_PROTO_ISO14443_MASK) 1099 if (im_protocols & NFC_PROTO_ISO14443_B_MASK)
997 pn533_poll_add_mod(dev, PN533_POLL_MOD_847KBPS_B); 1100 pn533_poll_add_mod(dev, PN533_POLL_MOD_847KBPS_B);
1101
1102 if (tm_protocols)
1103 pn533_poll_add_mod(dev, PN533_LISTEN_MOD);
998} 1104}
999 1105
1000static void pn533_start_poll_frame(struct pn533_frame *frame, 1106static int pn533_start_poll_complete(struct pn533 *dev, void *arg,
1001 struct pn533_poll_modulations *mod) 1107 u8 *params, int params_len)
1002{ 1108{
1109 struct pn533_poll_response *resp;
1110 int rc;
1003 1111
1004 pn533_tx_frame_init(frame, PN533_CMD_IN_LIST_PASSIVE_TARGET); 1112 nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
1005 1113
1006 memcpy(PN533_FRAME_CMD_PARAMS_PTR(frame), &mod->data, mod->len); 1114 resp = (struct pn533_poll_response *) params;
1007 frame->datalen += mod->len; 1115 if (resp->nbtg) {
1116 rc = pn533_target_found(dev, resp, params_len);
1117
1118 /* We must stop the poll after a valid target found */
1119 if (rc == 0) {
1120 pn533_poll_reset_mod_list(dev);
1121 return 0;
1122 }
1123 }
1124
1125 return -EAGAIN;
1126}
1127
1128static int pn533_init_target_frame(struct pn533_frame *frame,
1129 u8 *gb, size_t gb_len)
1130{
1131 struct pn533_cmd_init_target *cmd;
1132 size_t cmd_len;
1133 u8 felica_params[18] = {0x1, 0xfe, /* DEP */
1134 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, /* random */
1135 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
1136 0xff, 0xff}; /* System code */
1137 u8 mifare_params[6] = {0x1, 0x1, /* SENS_RES */
1138 0x0, 0x0, 0x0,
1139 0x40}; /* SEL_RES for DEP */
1140
1141 cmd_len = sizeof(struct pn533_cmd_init_target) + gb_len + 1;
1142 cmd = kzalloc(cmd_len, GFP_KERNEL);
1143 if (cmd == NULL)
1144 return -ENOMEM;
1145
1146 pn533_tx_frame_init(frame, PN533_CMD_TG_INIT_AS_TARGET);
1147
1148 /* DEP support only */
1149 cmd->mode |= PN533_INIT_TARGET_DEP;
1150
1151 /* Felica params */
1152 memcpy(cmd->felica, felica_params, 18);
1153 get_random_bytes(cmd->felica + 2, 6);
1154
1155 /* NFCID3 */
1156 memset(cmd->nfcid3, 0, 10);
1157 memcpy(cmd->nfcid3, cmd->felica, 8);
1158
1159 /* MIFARE params */
1160 memcpy(cmd->mifare, mifare_params, 6);
1161
1162 /* General bytes */
1163 cmd->gb_len = gb_len;
1164 memcpy(cmd->gb, gb, gb_len);
1165
1166 /* Len Tk */
1167 cmd->gb[gb_len] = 0;
1168
1169 memcpy(PN533_FRAME_CMD_PARAMS_PTR(frame), cmd, cmd_len);
1170
1171 frame->datalen += cmd_len;
1008 1172
1009 pn533_tx_frame_finish(frame); 1173 pn533_tx_frame_finish(frame);
1174
1175 kfree(cmd);
1176
1177 return 0;
1010} 1178}
1011 1179
1012static int pn533_start_poll_complete(struct pn533 *dev, void *arg, 1180#define PN533_CMD_DATAEXCH_HEAD_LEN (sizeof(struct pn533_frame) + 3)
1013 u8 *params, int params_len) 1181#define PN533_CMD_DATAEXCH_DATA_MAXLEN 262
1182static int pn533_tm_get_data_complete(struct pn533 *dev, void *arg,
1183 u8 *params, int params_len)
1014{ 1184{
1015 struct pn533_poll_response *resp; 1185 struct sk_buff *skb_resp = arg;
1016 struct pn533_poll_modulations *next_mod; 1186 struct pn533_frame *in_frame = (struct pn533_frame *) skb_resp->data;
1017 int rc;
1018 1187
1019 nfc_dev_dbg(&dev->interface->dev, "%s", __func__); 1188 nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
1020 1189
1021 if (params_len == -ENOENT) { 1190 if (params_len < 0) {
1022 nfc_dev_dbg(&dev->interface->dev, "Polling operation has been" 1191 nfc_dev_err(&dev->interface->dev,
1023 " stopped"); 1192 "Error %d when starting as a target",
1024 goto stop_poll; 1193 params_len);
1194
1195 return params_len;
1196 }
1197
1198 if (params_len > 0 && params[0] != 0) {
1199 nfc_tm_deactivated(dev->nfc_dev);
1200
1201 dev->tgt_mode = 0;
1202
1203 kfree_skb(skb_resp);
1204 return 0;
1025 } 1205 }
1026 1206
1207 skb_put(skb_resp, PN533_FRAME_SIZE(in_frame));
1208 skb_pull(skb_resp, PN533_CMD_DATAEXCH_HEAD_LEN);
1209 skb_trim(skb_resp, skb_resp->len - PN533_FRAME_TAIL_SIZE);
1210
1211 return nfc_tm_data_received(dev->nfc_dev, skb_resp);
1212}
1213
1214static void pn533_wq_tg_get_data(struct work_struct *work)
1215{
1216 struct pn533 *dev = container_of(work, struct pn533, tg_work);
1217 struct pn533_frame *in_frame;
1218 struct sk_buff *skb_resp;
1219 size_t skb_resp_len;
1220
1221 nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
1222
1223 skb_resp_len = PN533_CMD_DATAEXCH_HEAD_LEN +
1224 PN533_CMD_DATAEXCH_DATA_MAXLEN +
1225 PN533_FRAME_TAIL_SIZE;
1226
1227 skb_resp = nfc_alloc_recv_skb(skb_resp_len, GFP_KERNEL);
1228 if (!skb_resp)
1229 return;
1230
1231 in_frame = (struct pn533_frame *)skb_resp->data;
1232
1233 pn533_tx_frame_init(dev->out_frame, PN533_CMD_TG_GET_DATA);
1234 pn533_tx_frame_finish(dev->out_frame);
1235
1236 pn533_send_cmd_frame_async(dev, dev->out_frame, in_frame,
1237 skb_resp_len,
1238 pn533_tm_get_data_complete,
1239 skb_resp, GFP_KERNEL);
1240
1241 return;
1242}
1243
1244#define ATR_REQ_GB_OFFSET 17
1245static int pn533_init_target_complete(struct pn533 *dev, void *arg,
1246 u8 *params, int params_len)
1247{
1248 struct pn533_cmd_init_target_response *resp;
1249 u8 frame, comm_mode = NFC_COMM_PASSIVE, *gb;
1250 size_t gb_len;
1251 int rc;
1252
1253 nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
1254
1027 if (params_len < 0) { 1255 if (params_len < 0) {
1028 nfc_dev_err(&dev->interface->dev, "Error %d when running poll", 1256 nfc_dev_err(&dev->interface->dev,
1029 params_len); 1257 "Error %d when starting as a target",
1030 goto stop_poll; 1258 params_len);
1259
1260 return params_len;
1031 } 1261 }
1032 1262
1033 resp = (struct pn533_poll_response *) params; 1263 if (params_len < ATR_REQ_GB_OFFSET + 1)
1034 if (resp->nbtg) { 1264 return -EINVAL;
1035 rc = pn533_target_found(dev, resp, params_len);
1036 1265
1037 /* We must stop the poll after a valid target found */ 1266 resp = (struct pn533_cmd_init_target_response *) params;
1038 if (rc == 0) 1267
1039 goto stop_poll; 1268 nfc_dev_dbg(&dev->interface->dev, "Target mode 0x%x param len %d\n",
1269 resp->mode, params_len);
1270
1271 frame = resp->mode & PN533_INIT_TARGET_RESP_FRAME_MASK;
1272 if (frame == PN533_INIT_TARGET_RESP_ACTIVE)
1273 comm_mode = NFC_COMM_ACTIVE;
1040 1274
1041 if (rc != -EAGAIN) 1275 /* Again, only DEP */
1042 nfc_dev_err(&dev->interface->dev, "The target found is" 1276 if ((resp->mode & PN533_INIT_TARGET_RESP_DEP) == 0)
1043 " not valid - continuing to poll"); 1277 return -EOPNOTSUPP;
1278
1279 gb = resp->cmd + ATR_REQ_GB_OFFSET;
1280 gb_len = params_len - (ATR_REQ_GB_OFFSET + 1);
1281
1282 rc = nfc_tm_activated(dev->nfc_dev, NFC_PROTO_NFC_DEP_MASK,
1283 comm_mode, gb, gb_len);
1284 if (rc < 0) {
1285 nfc_dev_err(&dev->interface->dev,
1286 "Error when signaling target activation");
1287 return rc;
1044 } 1288 }
1045 1289
1046 dev->poll_mod_curr = (dev->poll_mod_curr + 1) % dev->poll_mod_count; 1290 dev->tgt_mode = 1;
1047 1291
1048 next_mod = dev->poll_mod_active[dev->poll_mod_curr]; 1292 queue_work(dev->wq, &dev->tg_work);
1049 1293
1050 nfc_dev_dbg(&dev->interface->dev, "Polling next modulation (0x%x)", 1294 return 0;
1051 dev->poll_mod_curr); 1295}
1052 1296
1053 pn533_start_poll_frame(dev->out_frame, next_mod); 1297static void pn533_listen_mode_timer(unsigned long data)
1298{
1299 struct pn533 *dev = (struct pn533 *) data;
1054 1300
1055 /* Don't need to down the semaphore again */ 1301 nfc_dev_dbg(&dev->interface->dev, "Listen mode timeout");
1056 rc = __pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame, 1302
1057 dev->in_maxlen, pn533_start_poll_complete, 1303 /* An ack will cancel the last issued command (poll) */
1058 NULL, GFP_ATOMIC); 1304 pn533_send_ack(dev, GFP_ATOMIC);
1305
1306 dev->cancel_listen = 1;
1307
1308 mutex_unlock(&dev->cmd_lock);
1309
1310 pn533_poll_next_mod(dev);
1311
1312 queue_work(dev->wq, &dev->poll_work);
1313}
1314
1315static int pn533_poll_complete(struct pn533 *dev, void *arg,
1316 u8 *params, int params_len)
1317{
1318 struct pn533_poll_modulations *cur_mod;
1319 int rc;
1320
1321 nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
1322
1323 if (params_len == -ENOENT) {
1324 if (dev->poll_mod_count != 0)
1325 return 0;
1326
1327 nfc_dev_err(&dev->interface->dev,
1328 "Polling operation has been stopped");
1059 1329
1060 if (rc == -EPERM) {
1061 nfc_dev_dbg(&dev->interface->dev, "Cannot poll next modulation"
1062 " because poll has been stopped");
1063 goto stop_poll; 1330 goto stop_poll;
1064 } 1331 }
1065 1332
1066 if (rc) { 1333 if (params_len < 0) {
1067 nfc_dev_err(&dev->interface->dev, "Error %d when trying to poll" 1334 nfc_dev_err(&dev->interface->dev,
1068 " next modulation", rc); 1335 "Error %d when running poll", params_len);
1336
1069 goto stop_poll; 1337 goto stop_poll;
1070 } 1338 }
1071 1339
1072 /* Inform caller function to do not up the semaphore */ 1340 cur_mod = dev->poll_mod_active[dev->poll_mod_curr];
1073 return -EINPROGRESS; 1341
1342 if (cur_mod->len == 0) {
1343 del_timer(&dev->listen_timer);
1344
1345 return pn533_init_target_complete(dev, arg, params, params_len);
1346 } else {
1347 rc = pn533_start_poll_complete(dev, arg, params, params_len);
1348 if (!rc)
1349 return rc;
1350 }
1351
1352 pn533_poll_next_mod(dev);
1353
1354 queue_work(dev->wq, &dev->poll_work);
1355
1356 return 0;
1074 1357
1075stop_poll: 1358stop_poll:
1076 pn533_poll_reset_mod_list(dev); 1359 pn533_poll_reset_mod_list(dev);
@@ -1078,61 +1361,104 @@ stop_poll:
1078 return 0; 1361 return 0;
1079} 1362}
1080 1363
1081static int pn533_start_poll(struct nfc_dev *nfc_dev, u32 protocols) 1364static void pn533_build_poll_frame(struct pn533 *dev,
1365 struct pn533_frame *frame,
1366 struct pn533_poll_modulations *mod)
1082{ 1367{
1083 struct pn533 *dev = nfc_get_drvdata(nfc_dev); 1368 nfc_dev_dbg(&dev->interface->dev, "mod len %d\n", mod->len);
1084 struct pn533_poll_modulations *start_mod;
1085 int rc;
1086 1369
1087 nfc_dev_dbg(&dev->interface->dev, "%s - protocols=0x%x", __func__, 1370 if (mod->len == 0) {
1088 protocols); 1371 /* Listen mode */
1372 pn533_init_target_frame(frame, dev->gb, dev->gb_len);
1373 } else {
1374 /* Polling mode */
1375 pn533_tx_frame_init(frame, PN533_CMD_IN_LIST_PASSIVE_TARGET);
1089 1376
1090 if (dev->poll_mod_count) { 1377 memcpy(PN533_FRAME_CMD_PARAMS_PTR(frame), &mod->data, mod->len);
1091 nfc_dev_err(&dev->interface->dev, "Polling operation already" 1378 frame->datalen += mod->len;
1092 " active");
1093 return -EBUSY;
1094 }
1095 1379
1096 if (dev->tgt_active_prot) { 1380 pn533_tx_frame_finish(frame);
1097 nfc_dev_err(&dev->interface->dev, "Cannot poll with a target"
1098 " already activated");
1099 return -EBUSY;
1100 } 1381 }
1382}
1383
1384static int pn533_send_poll_frame(struct pn533 *dev)
1385{
1386 struct pn533_poll_modulations *cur_mod;
1387 int rc;
1101 1388
1102 pn533_poll_create_mod_list(dev, protocols); 1389 cur_mod = dev->poll_mod_active[dev->poll_mod_curr];
1103 1390
1104 if (!dev->poll_mod_count) { 1391 pn533_build_poll_frame(dev, dev->out_frame, cur_mod);
1105 nfc_dev_err(&dev->interface->dev, "No valid protocols" 1392
1106 " specified"); 1393 rc = pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame,
1107 rc = -EINVAL; 1394 dev->in_maxlen, pn533_poll_complete,
1108 goto error; 1395 NULL, GFP_KERNEL);
1396 if (rc)
1397 nfc_dev_err(&dev->interface->dev, "Polling loop error %d", rc);
1398
1399 return rc;
1400}
1401
1402static void pn533_wq_poll(struct work_struct *work)
1403{
1404 struct pn533 *dev = container_of(work, struct pn533, poll_work);
1405 struct pn533_poll_modulations *cur_mod;
1406 int rc;
1407
1408 cur_mod = dev->poll_mod_active[dev->poll_mod_curr];
1409
1410 nfc_dev_dbg(&dev->interface->dev,
1411 "%s cancel_listen %d modulation len %d",
1412 __func__, dev->cancel_listen, cur_mod->len);
1413
1414 if (dev->cancel_listen == 1) {
1415 dev->cancel_listen = 0;
1416 usb_kill_urb(dev->in_urb);
1109 } 1417 }
1110 1418
1111 nfc_dev_dbg(&dev->interface->dev, "It will poll %d modulations types", 1419 rc = pn533_send_poll_frame(dev);
1112 dev->poll_mod_count); 1420 if (rc)
1421 return;
1113 1422
1114 dev->poll_mod_curr = 0; 1423 if (cur_mod->len == 0 && dev->poll_mod_count > 1)
1115 start_mod = dev->poll_mod_active[dev->poll_mod_curr]; 1424 mod_timer(&dev->listen_timer, jiffies + PN533_LISTEN_TIME * HZ);
1116 1425
1117 pn533_start_poll_frame(dev->out_frame, start_mod); 1426 return;
1427}
1118 1428
1119 rc = pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame, 1429static int pn533_start_poll(struct nfc_dev *nfc_dev,
1120 dev->in_maxlen, pn533_start_poll_complete, 1430 u32 im_protocols, u32 tm_protocols)
1121 NULL, GFP_KERNEL); 1431{
1432 struct pn533 *dev = nfc_get_drvdata(nfc_dev);
1122 1433
1123 if (rc) { 1434 nfc_dev_dbg(&dev->interface->dev,
1124 nfc_dev_err(&dev->interface->dev, "Error %d when trying to" 1435 "%s: im protocols 0x%x tm protocols 0x%x",
1125 " start poll", rc); 1436 __func__, im_protocols, tm_protocols);
1126 goto error; 1437
1438 if (dev->tgt_active_prot) {
1439 nfc_dev_err(&dev->interface->dev,
1440 "Cannot poll with a target already activated");
1441 return -EBUSY;
1127 } 1442 }
1128 1443
1129 dev->poll_protocols = protocols; 1444 if (dev->tgt_mode) {
1445 nfc_dev_err(&dev->interface->dev,
1446 "Cannot poll while already being activated");
1447 return -EBUSY;
1448 }
1130 1449
1131 return 0; 1450 if (tm_protocols) {
1451 dev->gb = nfc_get_local_general_bytes(nfc_dev, &dev->gb_len);
1452 if (dev->gb == NULL)
1453 tm_protocols = 0;
1454 }
1132 1455
1133error: 1456 dev->poll_mod_curr = 0;
1134 pn533_poll_reset_mod_list(dev); 1457 pn533_poll_create_mod_list(dev, im_protocols, tm_protocols);
1135 return rc; 1458 dev->poll_protocols = im_protocols;
1459 dev->listen_protocols = tm_protocols;
1460
1461 return pn533_send_poll_frame(dev);
1136} 1462}
1137 1463
1138static void pn533_stop_poll(struct nfc_dev *nfc_dev) 1464static void pn533_stop_poll(struct nfc_dev *nfc_dev)
@@ -1141,6 +1467,8 @@ static void pn533_stop_poll(struct nfc_dev *nfc_dev)
1141 1467
1142 nfc_dev_dbg(&dev->interface->dev, "%s", __func__); 1468 nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
1143 1469
1470 del_timer(&dev->listen_timer);
1471
1144 if (!dev->poll_mod_count) { 1472 if (!dev->poll_mod_count) {
1145 nfc_dev_dbg(&dev->interface->dev, "Polling operation was not" 1473 nfc_dev_dbg(&dev->interface->dev, "Polling operation was not"
1146 " running"); 1474 " running");
@@ -1152,6 +1480,8 @@ static void pn533_stop_poll(struct nfc_dev *nfc_dev)
1152 1480
1153 /* prevent pn533_start_poll_complete to issue a new poll meanwhile */ 1481 /* prevent pn533_start_poll_complete to issue a new poll meanwhile */
1154 usb_kill_urb(dev->in_urb); 1482 usb_kill_urb(dev->in_urb);
1483
1484 pn533_poll_reset_mod_list(dev);
1155} 1485}
1156 1486
1157static int pn533_activate_target_nfcdep(struct pn533 *dev) 1487static int pn533_activate_target_nfcdep(struct pn533 *dev)
@@ -1349,13 +1679,29 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
1349 return 0; 1679 return 0;
1350} 1680}
1351 1681
1682static int pn533_mod_to_baud(struct pn533 *dev)
1683{
1684 switch (dev->poll_mod_curr) {
1685 case PN533_POLL_MOD_106KBPS_A:
1686 return 0;
1687 case PN533_POLL_MOD_212KBPS_FELICA:
1688 return 1;
1689 case PN533_POLL_MOD_424KBPS_FELICA:
1690 return 2;
1691 default:
1692 return -EINVAL;
1693 }
1694}
1695
1696#define PASSIVE_DATA_LEN 5
1352static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target, 1697static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
1353 u8 comm_mode, u8* gb, size_t gb_len) 1698 u8 comm_mode, u8* gb, size_t gb_len)
1354{ 1699{
1355 struct pn533 *dev = nfc_get_drvdata(nfc_dev); 1700 struct pn533 *dev = nfc_get_drvdata(nfc_dev);
1356 struct pn533_cmd_jump_dep *cmd; 1701 struct pn533_cmd_jump_dep *cmd;
1357 u8 cmd_len; 1702 u8 cmd_len, *data_ptr;
1358 int rc; 1703 u8 passive_data[PASSIVE_DATA_LEN] = {0x00, 0xff, 0xff, 0x00, 0x3};
1704 int rc, baud;
1359 1705
1360 nfc_dev_dbg(&dev->interface->dev, "%s", __func__); 1706 nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
1361 1707
@@ -1371,7 +1717,17 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
1371 return -EBUSY; 1717 return -EBUSY;
1372 } 1718 }
1373 1719
1720 baud = pn533_mod_to_baud(dev);
1721 if (baud < 0) {
1722 nfc_dev_err(&dev->interface->dev,
1723 "Invalid curr modulation %d", dev->poll_mod_curr);
1724 return baud;
1725 }
1726
1374 cmd_len = sizeof(struct pn533_cmd_jump_dep) + gb_len; 1727 cmd_len = sizeof(struct pn533_cmd_jump_dep) + gb_len;
1728 if (comm_mode == NFC_COMM_PASSIVE)
1729 cmd_len += PASSIVE_DATA_LEN;
1730
1375 cmd = kzalloc(cmd_len, GFP_KERNEL); 1731 cmd = kzalloc(cmd_len, GFP_KERNEL);
1376 if (cmd == NULL) 1732 if (cmd == NULL)
1377 return -ENOMEM; 1733 return -ENOMEM;
@@ -1379,10 +1735,18 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
1379 pn533_tx_frame_init(dev->out_frame, PN533_CMD_IN_JUMP_FOR_DEP); 1735 pn533_tx_frame_init(dev->out_frame, PN533_CMD_IN_JUMP_FOR_DEP);
1380 1736
1381 cmd->active = !comm_mode; 1737 cmd->active = !comm_mode;
1382 cmd->baud = 0; 1738 cmd->next = 0;
1739 cmd->baud = baud;
1740 data_ptr = cmd->data;
1741 if (comm_mode == NFC_COMM_PASSIVE && cmd->baud > 0) {
1742 memcpy(data_ptr, passive_data, PASSIVE_DATA_LEN);
1743 cmd->next |= 1;
1744 data_ptr += PASSIVE_DATA_LEN;
1745 }
1746
1383 if (gb != NULL && gb_len > 0) { 1747 if (gb != NULL && gb_len > 0) {
1384 cmd->next = 4; /* We have some Gi */ 1748 cmd->next |= 4; /* We have some Gi */
1385 memcpy(cmd->gt, gb, gb_len); 1749 memcpy(data_ptr, gb, gb_len);
1386 } else { 1750 } else {
1387 cmd->next = 0; 1751 cmd->next = 0;
1388 } 1752 }
@@ -1407,15 +1771,25 @@ out:
1407 1771
1408static int pn533_dep_link_down(struct nfc_dev *nfc_dev) 1772static int pn533_dep_link_down(struct nfc_dev *nfc_dev)
1409{ 1773{
1410 pn533_deactivate_target(nfc_dev, 0); 1774 struct pn533 *dev = nfc_get_drvdata(nfc_dev);
1775
1776 pn533_poll_reset_mod_list(dev);
1777
1778 if (dev->tgt_mode || dev->tgt_active_prot) {
1779 pn533_send_ack(dev, GFP_KERNEL);
1780 usb_kill_urb(dev->in_urb);
1781 }
1782
1783 dev->tgt_active_prot = 0;
1784 dev->tgt_mode = 0;
1785
1786 skb_queue_purge(&dev->resp_q);
1411 1787
1412 return 0; 1788 return 0;
1413} 1789}
1414 1790
1415#define PN533_CMD_DATAEXCH_HEAD_LEN (sizeof(struct pn533_frame) + 3) 1791static int pn533_build_tx_frame(struct pn533 *dev, struct sk_buff *skb,
1416#define PN533_CMD_DATAEXCH_DATA_MAXLEN 262 1792 bool target)
1417
1418static int pn533_data_exchange_tx_frame(struct pn533 *dev, struct sk_buff *skb)
1419{ 1793{
1420 int payload_len = skb->len; 1794 int payload_len = skb->len;
1421 struct pn533_frame *out_frame; 1795 struct pn533_frame *out_frame;
@@ -1432,14 +1806,37 @@ static int pn533_data_exchange_tx_frame(struct pn533 *dev, struct sk_buff *skb)
1432 return -ENOSYS; 1806 return -ENOSYS;
1433 } 1807 }
1434 1808
1435 skb_push(skb, PN533_CMD_DATAEXCH_HEAD_LEN); 1809 if (target == true) {
1436 out_frame = (struct pn533_frame *) skb->data; 1810 switch (dev->device_type) {
1811 case PN533_DEVICE_PASORI:
1812 if (dev->tgt_active_prot == NFC_PROTO_FELICA) {
1813 skb_push(skb, PN533_CMD_DATAEXCH_HEAD_LEN - 1);
1814 out_frame = (struct pn533_frame *) skb->data;
1815 pn533_tx_frame_init(out_frame,
1816 PN533_CMD_IN_COMM_THRU);
1817
1818 break;
1819 }
1820
1821 default:
1822 skb_push(skb, PN533_CMD_DATAEXCH_HEAD_LEN);
1823 out_frame = (struct pn533_frame *) skb->data;
1824 pn533_tx_frame_init(out_frame,
1825 PN533_CMD_IN_DATA_EXCHANGE);
1826 tg = 1;
1827 memcpy(PN533_FRAME_CMD_PARAMS_PTR(out_frame),
1828 &tg, sizeof(u8));
1829 out_frame->datalen += sizeof(u8);
1830
1831 break;
1832 }
1437 1833
1438 pn533_tx_frame_init(out_frame, PN533_CMD_IN_DATA_EXCHANGE); 1834 } else {
1835 skb_push(skb, PN533_CMD_DATAEXCH_HEAD_LEN - 1);
1836 out_frame = (struct pn533_frame *) skb->data;
1837 pn533_tx_frame_init(out_frame, PN533_CMD_TG_SET_DATA);
1838 }
1439 1839
1440 tg = 1;
1441 memcpy(PN533_FRAME_CMD_PARAMS_PTR(out_frame), &tg, sizeof(u8));
1442 out_frame->datalen += sizeof(u8);
1443 1840
1444 /* The data is already in the out_frame, just update the datalen */ 1841 /* The data is already in the out_frame, just update the datalen */
1445 out_frame->datalen += payload_len; 1842 out_frame->datalen += payload_len;
@@ -1550,9 +1947,9 @@ error:
1550 return 0; 1947 return 0;
1551} 1948}
1552 1949
1553static int pn533_data_exchange(struct nfc_dev *nfc_dev, 1950static int pn533_transceive(struct nfc_dev *nfc_dev,
1554 struct nfc_target *target, struct sk_buff *skb, 1951 struct nfc_target *target, struct sk_buff *skb,
1555 data_exchange_cb_t cb, void *cb_context) 1952 data_exchange_cb_t cb, void *cb_context)
1556{ 1953{
1557 struct pn533 *dev = nfc_get_drvdata(nfc_dev); 1954 struct pn533 *dev = nfc_get_drvdata(nfc_dev);
1558 struct pn533_frame *out_frame, *in_frame; 1955 struct pn533_frame *out_frame, *in_frame;
@@ -1570,7 +1967,7 @@ static int pn533_data_exchange(struct nfc_dev *nfc_dev,
1570 goto error; 1967 goto error;
1571 } 1968 }
1572 1969
1573 rc = pn533_data_exchange_tx_frame(dev, skb); 1970 rc = pn533_build_tx_frame(dev, skb, true);
1574 if (rc) 1971 if (rc)
1575 goto error; 1972 goto error;
1576 1973
@@ -1618,6 +2015,63 @@ error:
1618 return rc; 2015 return rc;
1619} 2016}
1620 2017
2018static int pn533_tm_send_complete(struct pn533 *dev, void *arg,
2019 u8 *params, int params_len)
2020{
2021 nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
2022
2023 if (params_len < 0) {
2024 nfc_dev_err(&dev->interface->dev,
2025 "Error %d when sending data",
2026 params_len);
2027
2028 return params_len;
2029 }
2030
2031 if (params_len > 0 && params[0] != 0) {
2032 nfc_tm_deactivated(dev->nfc_dev);
2033
2034 dev->tgt_mode = 0;
2035
2036 return 0;
2037 }
2038
2039 queue_work(dev->wq, &dev->tg_work);
2040
2041 return 0;
2042}
2043
2044static int pn533_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
2045{
2046 struct pn533 *dev = nfc_get_drvdata(nfc_dev);
2047 struct pn533_frame *out_frame;
2048 int rc;
2049
2050 nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
2051
2052 rc = pn533_build_tx_frame(dev, skb, false);
2053 if (rc)
2054 goto error;
2055
2056 out_frame = (struct pn533_frame *) skb->data;
2057
2058 rc = pn533_send_cmd_frame_async(dev, out_frame, dev->in_frame,
2059 dev->in_maxlen, pn533_tm_send_complete,
2060 NULL, GFP_KERNEL);
2061 if (rc) {
2062 nfc_dev_err(&dev->interface->dev,
2063 "Error %d when trying to send data", rc);
2064 goto error;
2065 }
2066
2067 return 0;
2068
2069error:
2070 kfree_skb(skb);
2071
2072 return rc;
2073}
2074
1621static void pn533_wq_mi_recv(struct work_struct *work) 2075static void pn533_wq_mi_recv(struct work_struct *work)
1622{ 2076{
1623 struct pn533 *dev = container_of(work, struct pn533, mi_work); 2077 struct pn533 *dev = container_of(work, struct pn533, mi_work);
@@ -1638,7 +2092,7 @@ static void pn533_wq_mi_recv(struct work_struct *work)
1638 2092
1639 skb_reserve(skb_cmd, PN533_CMD_DATAEXCH_HEAD_LEN); 2093 skb_reserve(skb_cmd, PN533_CMD_DATAEXCH_HEAD_LEN);
1640 2094
1641 rc = pn533_data_exchange_tx_frame(dev, skb_cmd); 2095 rc = pn533_build_tx_frame(dev, skb_cmd, true);
1642 if (rc) 2096 if (rc)
1643 goto error_frame; 2097 goto error_frame;
1644 2098
@@ -1677,7 +2131,7 @@ error_cmd:
1677 2131
1678 kfree(arg); 2132 kfree(arg);
1679 2133
1680 up(&dev->cmd_lock); 2134 mutex_unlock(&dev->cmd_lock);
1681} 2135}
1682 2136
1683static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata, 2137static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata,
@@ -1703,7 +2157,28 @@ static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata,
1703 return rc; 2157 return rc;
1704} 2158}
1705 2159
1706struct nfc_ops pn533_nfc_ops = { 2160static int pn533_fw_reset(struct pn533 *dev)
2161{
2162 int rc;
2163 u8 *params;
2164
2165 nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
2166
2167 pn533_tx_frame_init(dev->out_frame, 0x18);
2168
2169 params = PN533_FRAME_CMD_PARAMS_PTR(dev->out_frame);
2170 params[0] = 0x1;
2171 dev->out_frame->datalen += 1;
2172
2173 pn533_tx_frame_finish(dev->out_frame);
2174
2175 rc = pn533_send_cmd_frame_sync(dev, dev->out_frame, dev->in_frame,
2176 dev->in_maxlen);
2177
2178 return rc;
2179}
2180
2181static struct nfc_ops pn533_nfc_ops = {
1707 .dev_up = NULL, 2182 .dev_up = NULL,
1708 .dev_down = NULL, 2183 .dev_down = NULL,
1709 .dep_link_up = pn533_dep_link_up, 2184 .dep_link_up = pn533_dep_link_up,
@@ -1712,9 +2187,88 @@ struct nfc_ops pn533_nfc_ops = {
1712 .stop_poll = pn533_stop_poll, 2187 .stop_poll = pn533_stop_poll,
1713 .activate_target = pn533_activate_target, 2188 .activate_target = pn533_activate_target,
1714 .deactivate_target = pn533_deactivate_target, 2189 .deactivate_target = pn533_deactivate_target,
1715 .data_exchange = pn533_data_exchange, 2190 .im_transceive = pn533_transceive,
2191 .tm_send = pn533_tm_send,
1716}; 2192};
1717 2193
2194static int pn533_setup(struct pn533 *dev)
2195{
2196 struct pn533_config_max_retries max_retries;
2197 struct pn533_config_timing timing;
2198 u8 pasori_cfg[3] = {0x08, 0x01, 0x08};
2199 int rc;
2200
2201 switch (dev->device_type) {
2202 case PN533_DEVICE_STD:
2203 max_retries.mx_rty_atr = PN533_CONFIG_MAX_RETRIES_ENDLESS;
2204 max_retries.mx_rty_psl = 2;
2205 max_retries.mx_rty_passive_act =
2206 PN533_CONFIG_MAX_RETRIES_NO_RETRY;
2207
2208 timing.rfu = PN533_CONFIG_TIMING_102;
2209 timing.atr_res_timeout = PN533_CONFIG_TIMING_204;
2210 timing.dep_timeout = PN533_CONFIG_TIMING_409;
2211
2212 break;
2213
2214 case PN533_DEVICE_PASORI:
2215 max_retries.mx_rty_atr = 0x2;
2216 max_retries.mx_rty_psl = 0x1;
2217 max_retries.mx_rty_passive_act =
2218 PN533_CONFIG_MAX_RETRIES_NO_RETRY;
2219
2220 timing.rfu = PN533_CONFIG_TIMING_102;
2221 timing.atr_res_timeout = PN533_CONFIG_TIMING_102;
2222 timing.dep_timeout = PN533_CONFIG_TIMING_204;
2223
2224 break;
2225
2226 default:
2227 nfc_dev_err(&dev->interface->dev, "Unknown device type %d\n",
2228 dev->device_type);
2229 return -EINVAL;
2230 }
2231
2232 rc = pn533_set_configuration(dev, PN533_CFGITEM_MAX_RETRIES,
2233 (u8 *)&max_retries, sizeof(max_retries));
2234 if (rc) {
2235 nfc_dev_err(&dev->interface->dev,
2236 "Error on setting MAX_RETRIES config");
2237 return rc;
2238 }
2239
2240
2241 rc = pn533_set_configuration(dev, PN533_CFGITEM_TIMING,
2242 (u8 *)&timing, sizeof(timing));
2243 if (rc) {
2244 nfc_dev_err(&dev->interface->dev,
2245 "Error on setting RF timings");
2246 return rc;
2247 }
2248
2249 switch (dev->device_type) {
2250 case PN533_DEVICE_STD:
2251 break;
2252
2253 case PN533_DEVICE_PASORI:
2254 pn533_fw_reset(dev);
2255
2256 rc = pn533_set_configuration(dev, PN533_CFGITEM_PASORI,
2257 pasori_cfg, 3);
2258 if (rc) {
2259 nfc_dev_err(&dev->interface->dev,
2260 "Error while settings PASORI config");
2261 return rc;
2262 }
2263
2264 pn533_fw_reset(dev);
2265
2266 break;
2267 }
2268
2269 return 0;
2270}
2271
1718static int pn533_probe(struct usb_interface *interface, 2272static int pn533_probe(struct usb_interface *interface,
1719 const struct usb_device_id *id) 2273 const struct usb_device_id *id)
1720{ 2274{
@@ -1722,7 +2276,6 @@ static int pn533_probe(struct usb_interface *interface,
1722 struct pn533 *dev; 2276 struct pn533 *dev;
1723 struct usb_host_interface *iface_desc; 2277 struct usb_host_interface *iface_desc;
1724 struct usb_endpoint_descriptor *endpoint; 2278 struct usb_endpoint_descriptor *endpoint;
1725 struct pn533_config_max_retries max_retries;
1726 int in_endpoint = 0; 2279 int in_endpoint = 0;
1727 int out_endpoint = 0; 2280 int out_endpoint = 0;
1728 int rc = -ENOMEM; 2281 int rc = -ENOMEM;
@@ -1735,7 +2288,7 @@ static int pn533_probe(struct usb_interface *interface,
1735 2288
1736 dev->udev = usb_get_dev(interface_to_usbdev(interface)); 2289 dev->udev = usb_get_dev(interface_to_usbdev(interface));
1737 dev->interface = interface; 2290 dev->interface = interface;
1738 sema_init(&dev->cmd_lock, 1); 2291 mutex_init(&dev->cmd_lock);
1739 2292
1740 iface_desc = interface->cur_altsetting; 2293 iface_desc = interface->cur_altsetting;
1741 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { 2294 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
@@ -1779,12 +2332,18 @@ static int pn533_probe(struct usb_interface *interface,
1779 2332
1780 INIT_WORK(&dev->cmd_work, pn533_wq_cmd_complete); 2333 INIT_WORK(&dev->cmd_work, pn533_wq_cmd_complete);
1781 INIT_WORK(&dev->mi_work, pn533_wq_mi_recv); 2334 INIT_WORK(&dev->mi_work, pn533_wq_mi_recv);
2335 INIT_WORK(&dev->tg_work, pn533_wq_tg_get_data);
2336 INIT_WORK(&dev->poll_work, pn533_wq_poll);
1782 dev->wq = alloc_workqueue("pn533", 2337 dev->wq = alloc_workqueue("pn533",
1783 WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM, 2338 WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM,
1784 1); 2339 1);
1785 if (dev->wq == NULL) 2340 if (dev->wq == NULL)
1786 goto error; 2341 goto error;
1787 2342
2343 init_timer(&dev->listen_timer);
2344 dev->listen_timer.data = (unsigned long) dev;
2345 dev->listen_timer.function = pn533_listen_mode_timer;
2346
1788 skb_queue_head_init(&dev->resp_q); 2347 skb_queue_head_init(&dev->resp_q);
1789 2348
1790 usb_set_intfdata(interface, dev); 2349 usb_set_intfdata(interface, dev);
@@ -1802,10 +2361,22 @@ static int pn533_probe(struct usb_interface *interface,
1802 nfc_dev_info(&dev->interface->dev, "NXP PN533 firmware ver %d.%d now" 2361 nfc_dev_info(&dev->interface->dev, "NXP PN533 firmware ver %d.%d now"
1803 " attached", fw_ver->ver, fw_ver->rev); 2362 " attached", fw_ver->ver, fw_ver->rev);
1804 2363
1805 protocols = NFC_PROTO_JEWEL_MASK 2364 dev->device_type = id->driver_info;
1806 | NFC_PROTO_MIFARE_MASK | NFC_PROTO_FELICA_MASK 2365 switch (dev->device_type) {
1807 | NFC_PROTO_ISO14443_MASK 2366 case PN533_DEVICE_STD:
1808 | NFC_PROTO_NFC_DEP_MASK; 2367 protocols = PN533_ALL_PROTOCOLS;
2368 break;
2369
2370 case PN533_DEVICE_PASORI:
2371 protocols = PN533_NO_TYPE_B_PROTOCOLS;
2372 break;
2373
2374 default:
2375 nfc_dev_err(&dev->interface->dev, "Unknown device type %d\n",
2376 dev->device_type);
2377 rc = -EINVAL;
2378 goto destroy_wq;
2379 }
1809 2380
1810 dev->nfc_dev = nfc_allocate_device(&pn533_nfc_ops, protocols, 2381 dev->nfc_dev = nfc_allocate_device(&pn533_nfc_ops, protocols,
1811 PN533_CMD_DATAEXCH_HEAD_LEN, 2382 PN533_CMD_DATAEXCH_HEAD_LEN,
@@ -1820,23 +2391,18 @@ static int pn533_probe(struct usb_interface *interface,
1820 if (rc) 2391 if (rc)
1821 goto free_nfc_dev; 2392 goto free_nfc_dev;
1822 2393
1823 max_retries.mx_rty_atr = PN533_CONFIG_MAX_RETRIES_ENDLESS; 2394 rc = pn533_setup(dev);
1824 max_retries.mx_rty_psl = 2; 2395 if (rc)
1825 max_retries.mx_rty_passive_act = PN533_CONFIG_MAX_RETRIES_NO_RETRY; 2396 goto unregister_nfc_dev;
1826
1827 rc = pn533_set_configuration(dev, PN533_CFGITEM_MAX_RETRIES,
1828 (u8 *) &max_retries, sizeof(max_retries));
1829
1830 if (rc) {
1831 nfc_dev_err(&dev->interface->dev, "Error on setting MAX_RETRIES"
1832 " config");
1833 goto free_nfc_dev;
1834 }
1835 2397
1836 return 0; 2398 return 0;
1837 2399
2400unregister_nfc_dev:
2401 nfc_unregister_device(dev->nfc_dev);
2402
1838free_nfc_dev: 2403free_nfc_dev:
1839 nfc_free_device(dev->nfc_dev); 2404 nfc_free_device(dev->nfc_dev);
2405
1840destroy_wq: 2406destroy_wq:
1841 destroy_workqueue(dev->wq); 2407 destroy_workqueue(dev->wq);
1842error: 2408error:
@@ -1865,6 +2431,8 @@ static void pn533_disconnect(struct usb_interface *interface)
1865 2431
1866 skb_queue_purge(&dev->resp_q); 2432 skb_queue_purge(&dev->resp_q);
1867 2433
2434 del_timer(&dev->listen_timer);
2435
1868 kfree(dev->in_frame); 2436 kfree(dev->in_frame);
1869 usb_free_urb(dev->in_urb); 2437 usb_free_urb(dev->in_urb);
1870 kfree(dev->out_frame); 2438 kfree(dev->out_frame);
diff --git a/drivers/nfc/pn544_hci.c b/drivers/nfc/pn544_hci.c
index 281f18c2fb82..aa71807189ba 100644
--- a/drivers/nfc/pn544_hci.c
+++ b/drivers/nfc/pn544_hci.c
@@ -108,16 +108,22 @@ enum pn544_state {
108 108
109#define PN544_NFC_WI_MGMT_GATE 0xA1 109#define PN544_NFC_WI_MGMT_GATE 0xA1
110 110
111static u8 pn544_custom_gates[] = { 111static struct nfc_hci_gate pn544_gates[] = {
112 PN544_SYS_MGMT_GATE, 112 {NFC_HCI_ADMIN_GATE, NFC_HCI_INVALID_PIPE},
113 PN544_SWP_MGMT_GATE, 113 {NFC_HCI_LOOPBACK_GATE, NFC_HCI_INVALID_PIPE},
114 PN544_POLLING_LOOP_MGMT_GATE, 114 {NFC_HCI_ID_MGMT_GATE, NFC_HCI_INVALID_PIPE},
115 PN544_NFC_WI_MGMT_GATE, 115 {NFC_HCI_LINK_MGMT_GATE, NFC_HCI_INVALID_PIPE},
116 PN544_RF_READER_F_GATE, 116 {NFC_HCI_RF_READER_B_GATE, NFC_HCI_INVALID_PIPE},
117 PN544_RF_READER_JEWEL_GATE, 117 {NFC_HCI_RF_READER_A_GATE, NFC_HCI_INVALID_PIPE},
118 PN544_RF_READER_ISO15693_GATE, 118 {PN544_SYS_MGMT_GATE, NFC_HCI_INVALID_PIPE},
119 PN544_RF_READER_NFCIP1_INITIATOR_GATE, 119 {PN544_SWP_MGMT_GATE, NFC_HCI_INVALID_PIPE},
120 PN544_RF_READER_NFCIP1_TARGET_GATE 120 {PN544_POLLING_LOOP_MGMT_GATE, NFC_HCI_INVALID_PIPE},
121 {PN544_NFC_WI_MGMT_GATE, NFC_HCI_INVALID_PIPE},
122 {PN544_RF_READER_F_GATE, NFC_HCI_INVALID_PIPE},
123 {PN544_RF_READER_JEWEL_GATE, NFC_HCI_INVALID_PIPE},
124 {PN544_RF_READER_ISO15693_GATE, NFC_HCI_INVALID_PIPE},
125 {PN544_RF_READER_NFCIP1_INITIATOR_GATE, NFC_HCI_INVALID_PIPE},
126 {PN544_RF_READER_NFCIP1_TARGET_GATE, NFC_HCI_INVALID_PIPE}
121}; 127};
122 128
123/* Largest headroom needed for outgoing custom commands */ 129/* Largest headroom needed for outgoing custom commands */
@@ -377,6 +383,9 @@ static int pn544_hci_open(struct nfc_shdlc *shdlc)
377 383
378 r = pn544_hci_enable(info, HCI_MODE); 384 r = pn544_hci_enable(info, HCI_MODE);
379 385
386 if (r == 0)
387 info->state = PN544_ST_READY;
388
380out: 389out:
381 mutex_unlock(&info->info_lock); 390 mutex_unlock(&info->info_lock);
382 return r; 391 return r;
@@ -393,6 +402,8 @@ static void pn544_hci_close(struct nfc_shdlc *shdlc)
393 402
394 pn544_hci_disable(info); 403 pn544_hci_disable(info);
395 404
405 info->state = PN544_ST_COLD;
406
396out: 407out:
397 mutex_unlock(&info->info_lock); 408 mutex_unlock(&info->info_lock);
398} 409}
@@ -576,7 +587,8 @@ static int pn544_hci_xmit(struct nfc_shdlc *shdlc, struct sk_buff *skb)
576 return pn544_hci_i2c_write(client, skb->data, skb->len); 587 return pn544_hci_i2c_write(client, skb->data, skb->len);
577} 588}
578 589
579static int pn544_hci_start_poll(struct nfc_shdlc *shdlc, u32 protocols) 590static int pn544_hci_start_poll(struct nfc_shdlc *shdlc,
591 u32 im_protocols, u32 tm_protocols)
580{ 592{
581 struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc); 593 struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc);
582 u8 phases = 0; 594 u8 phases = 0;
@@ -584,7 +596,8 @@ static int pn544_hci_start_poll(struct nfc_shdlc *shdlc, u32 protocols)
584 u8 duration[2]; 596 u8 duration[2];
585 u8 activated; 597 u8 activated;
586 598
587 pr_info(DRIVER_DESC ": %s protocols = %d\n", __func__, protocols); 599 pr_info(DRIVER_DESC ": %s protocols 0x%x 0x%x\n",
600 __func__, im_protocols, tm_protocols);
588 601
589 r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE, 602 r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
590 NFC_HCI_EVT_END_OPERATION, NULL, 0); 603 NFC_HCI_EVT_END_OPERATION, NULL, 0);
@@ -604,10 +617,10 @@ static int pn544_hci_start_poll(struct nfc_shdlc *shdlc, u32 protocols)
604 if (r < 0) 617 if (r < 0)
605 return r; 618 return r;
606 619
607 if (protocols & (NFC_PROTO_ISO14443_MASK | NFC_PROTO_MIFARE_MASK | 620 if (im_protocols & (NFC_PROTO_ISO14443_MASK | NFC_PROTO_MIFARE_MASK |
608 NFC_PROTO_JEWEL_MASK)) 621 NFC_PROTO_JEWEL_MASK))
609 phases |= 1; /* Type A */ 622 phases |= 1; /* Type A */
610 if (protocols & NFC_PROTO_FELICA_MASK) { 623 if (im_protocols & NFC_PROTO_FELICA_MASK) {
611 phases |= (1 << 2); /* Type F 212 */ 624 phases |= (1 << 2); /* Type F 212 */
612 phases |= (1 << 3); /* Type F 424 */ 625 phases |= (1 << 3); /* Type F 424 */
613 } 626 }
@@ -842,10 +855,9 @@ static int __devinit pn544_hci_probe(struct i2c_client *client,
842 goto err_rti; 855 goto err_rti;
843 } 856 }
844 857
845 init_data.gate_count = ARRAY_SIZE(pn544_custom_gates); 858 init_data.gate_count = ARRAY_SIZE(pn544_gates);
846 859
847 memcpy(init_data.gates, pn544_custom_gates, 860 memcpy(init_data.gates, pn544_gates, sizeof(pn544_gates));
848 ARRAY_SIZE(pn544_custom_gates));
849 861
850 /* 862 /*
851 * TODO: Session id must include the driver name + some bus addr 863 * TODO: Session id must include the driver name + some bus addr
@@ -857,6 +869,7 @@ static int __devinit pn544_hci_probe(struct i2c_client *client,
857 NFC_PROTO_MIFARE_MASK | 869 NFC_PROTO_MIFARE_MASK |
858 NFC_PROTO_FELICA_MASK | 870 NFC_PROTO_FELICA_MASK |
859 NFC_PROTO_ISO14443_MASK | 871 NFC_PROTO_ISO14443_MASK |
872 NFC_PROTO_ISO14443_B_MASK |
860 NFC_PROTO_NFC_DEP_MASK; 873 NFC_PROTO_NFC_DEP_MASK;
861 874
862 info->shdlc = nfc_shdlc_allocate(&pn544_shdlc_ops, 875 info->shdlc = nfc_shdlc_allocate(&pn544_shdlc_ops,
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 2574abde8d99..8e6c25f35040 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -57,6 +57,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
57 const __be32 *paddr; 57 const __be32 *paddr;
58 u32 addr; 58 u32 addr;
59 int len; 59 int len;
60 bool is_c45;
60 61
61 /* A PHY must have a reg property in the range [0-31] */ 62 /* A PHY must have a reg property in the range [0-31] */
62 paddr = of_get_property(child, "reg", &len); 63 paddr = of_get_property(child, "reg", &len);
@@ -79,11 +80,18 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
79 mdio->irq[addr] = PHY_POLL; 80 mdio->irq[addr] = PHY_POLL;
80 } 81 }
81 82
82 phy = get_phy_device(mdio, addr); 83 is_c45 = of_device_is_compatible(child,
84 "ethernet-phy-ieee802.3-c45");
85 phy = get_phy_device(mdio, addr, is_c45);
86
83 if (!phy || IS_ERR(phy)) { 87 if (!phy || IS_ERR(phy)) {
84 dev_err(&mdio->dev, "error probing PHY at address %i\n", 88 phy = phy_device_create(mdio, addr, 0, false, NULL);
85 addr); 89 if (!phy || IS_ERR(phy)) {
86 continue; 90 dev_err(&mdio->dev,
91 "error creating PHY at address %i\n",
92 addr);
93 continue;
94 }
87 } 95 }
88 96
89 /* Associate the OF node with the device structure so it 97 /* Associate the OF node with the device structure so it
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 426986518e96..b09355c14ee1 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -647,7 +647,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
647 } 647 }
648 QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN); 648 QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN);
649 } else { 649 } else {
650 random_ether_addr(card->dev->dev_addr); 650 eth_random_addr(card->dev->dev_addr);
651 memcpy(card->dev->dev_addr, vendor_pre, 3); 651 memcpy(card->dev->dev_addr, vendor_pre, 3);
652 } 652 }
653 return 0; 653 return 0;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 7be5e9775691..bada7f66c146 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1473,7 +1473,7 @@ static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
1473 memcpy(card->dev->dev_addr, 1473 memcpy(card->dev->dev_addr,
1474 cmd->data.create_destroy_addr.unique_id, ETH_ALEN); 1474 cmd->data.create_destroy_addr.unique_id, ETH_ALEN);
1475 else 1475 else
1476 random_ether_addr(card->dev->dev_addr); 1476 eth_random_addr(card->dev->dev_addr);
1477 1477
1478 return 0; 1478 return 0;
1479} 1479}
@@ -2700,10 +2700,11 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
2700 rcu_read_lock(); 2700 rcu_read_lock();
2701 dst = skb_dst(skb); 2701 dst = skb_dst(skb);
2702 if (dst) 2702 if (dst)
2703 n = dst_get_neighbour_noref(dst); 2703 n = dst_neigh_lookup_skb(dst, skb);
2704 if (n) { 2704 if (n) {
2705 cast_type = n->type; 2705 cast_type = n->type;
2706 rcu_read_unlock(); 2706 rcu_read_unlock();
2707 neigh_release(n);
2707 if ((cast_type == RTN_BROADCAST) || 2708 if ((cast_type == RTN_BROADCAST) ||
2708 (cast_type == RTN_MULTICAST) || 2709 (cast_type == RTN_MULTICAST) ||
2709 (cast_type == RTN_ANYCAST)) 2710 (cast_type == RTN_ANYCAST))
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 0578fa0dc14b..42969e8a45bd 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -59,6 +59,7 @@
59#include "57xx_hsi_bnx2fc.h" 59#include "57xx_hsi_bnx2fc.h"
60#include "bnx2fc_debug.h" 60#include "bnx2fc_debug.h"
61#include "../../net/ethernet/broadcom/cnic_if.h" 61#include "../../net/ethernet/broadcom/cnic_if.h"
62#include "../../net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h"
62#include "bnx2fc_constants.h" 63#include "bnx2fc_constants.h"
63 64
64#define BNX2FC_NAME "bnx2fc" 65#define BNX2FC_NAME "bnx2fc"
@@ -84,6 +85,8 @@
84#define BNX2FC_NUM_MAX_SESS 1024 85#define BNX2FC_NUM_MAX_SESS 1024
85#define BNX2FC_NUM_MAX_SESS_LOG (ilog2(BNX2FC_NUM_MAX_SESS)) 86#define BNX2FC_NUM_MAX_SESS_LOG (ilog2(BNX2FC_NUM_MAX_SESS))
86 87
88#define BNX2FC_MAX_NPIV 256
89
87#define BNX2FC_MAX_OUTSTANDING_CMNDS 2048 90#define BNX2FC_MAX_OUTSTANDING_CMNDS 2048
88#define BNX2FC_CAN_QUEUE BNX2FC_MAX_OUTSTANDING_CMNDS 91#define BNX2FC_CAN_QUEUE BNX2FC_MAX_OUTSTANDING_CMNDS
89#define BNX2FC_ELSTM_XIDS BNX2FC_CAN_QUEUE 92#define BNX2FC_ELSTM_XIDS BNX2FC_CAN_QUEUE
@@ -206,6 +209,7 @@ struct bnx2fc_hba {
206 struct fcoe_statistics_params *stats_buffer; 209 struct fcoe_statistics_params *stats_buffer;
207 dma_addr_t stats_buf_dma; 210 dma_addr_t stats_buf_dma;
208 struct completion stat_req_done; 211 struct completion stat_req_done;
212 struct fcoe_capabilities fcoe_cap;
209 213
210 /*destroy handling */ 214 /*destroy handling */
211 struct timer_list destroy_timer; 215 struct timer_list destroy_timer;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index f52f668fd247..05fe6620b3f0 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -1326,6 +1326,7 @@ static void bnx2fc_hba_destroy(struct bnx2fc_hba *hba)
1326static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic) 1326static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
1327{ 1327{
1328 struct bnx2fc_hba *hba; 1328 struct bnx2fc_hba *hba;
1329 struct fcoe_capabilities *fcoe_cap;
1329 int rc; 1330 int rc;
1330 1331
1331 hba = kzalloc(sizeof(*hba), GFP_KERNEL); 1332 hba = kzalloc(sizeof(*hba), GFP_KERNEL);
@@ -1361,6 +1362,21 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
1361 printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n"); 1362 printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n");
1362 goto cmgr_err; 1363 goto cmgr_err;
1363 } 1364 }
1365 fcoe_cap = &hba->fcoe_cap;
1366
1367 fcoe_cap->capability1 = BNX2FC_TM_MAX_SQES <<
1368 FCOE_IOS_PER_CONNECTION_SHIFT;
1369 fcoe_cap->capability1 |= BNX2FC_NUM_MAX_SESS <<
1370 FCOE_LOGINS_PER_PORT_SHIFT;
1371 fcoe_cap->capability2 = BNX2FC_MAX_OUTSTANDING_CMNDS <<
1372 FCOE_NUMBER_OF_EXCHANGES_SHIFT;
1373 fcoe_cap->capability2 |= BNX2FC_MAX_NPIV <<
1374 FCOE_NPIV_WWN_PER_PORT_SHIFT;
1375 fcoe_cap->capability3 = BNX2FC_NUM_MAX_SESS <<
1376 FCOE_TARGETS_SUPPORTED_SHIFT;
1377 fcoe_cap->capability3 |= BNX2FC_MAX_OUTSTANDING_CMNDS <<
1378 FCOE_OUTSTANDING_COMMANDS_SHIFT;
1379 fcoe_cap->capability4 = FCOE_CAPABILITY4_STATEFUL;
1364 1380
1365 init_waitqueue_head(&hba->shutdown_wait); 1381 init_waitqueue_head(&hba->shutdown_wait);
1366 init_waitqueue_head(&hba->destroy_wait); 1382 init_waitqueue_head(&hba->destroy_wait);
@@ -1691,6 +1707,32 @@ static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba)
1691 hba->pcidev = NULL; 1707 hba->pcidev = NULL;
1692} 1708}
1693 1709
1710/**
1711 * bnx2fc_ulp_get_stats - cnic callback to populate FCoE stats
1712 *
1713 * @handle: transport handle pointing to adapter struture
1714 */
1715static int bnx2fc_ulp_get_stats(void *handle)
1716{
1717 struct bnx2fc_hba *hba = handle;
1718 struct cnic_dev *cnic;
1719 struct fcoe_stats_info *stats_addr;
1720
1721 if (!hba)
1722 return -EINVAL;
1723
1724 cnic = hba->cnic;
1725 stats_addr = &cnic->stats_addr->fcoe_stat;
1726 if (!stats_addr)
1727 return -EINVAL;
1728
1729 strncpy(stats_addr->version, BNX2FC_VERSION,
1730 sizeof(stats_addr->version));
1731 stats_addr->txq_size = BNX2FC_SQ_WQES_MAX;
1732 stats_addr->rxq_size = BNX2FC_CQ_WQES_MAX;
1733
1734 return 0;
1735}
1694 1736
1695 1737
1696/** 1738/**
@@ -1944,6 +1986,7 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
1944 adapter_count++; 1986 adapter_count++;
1945 mutex_unlock(&bnx2fc_dev_lock); 1987 mutex_unlock(&bnx2fc_dev_lock);
1946 1988
1989 dev->fcoe_cap = &hba->fcoe_cap;
1947 clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic); 1990 clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
1948 rc = dev->register_device(dev, CNIC_ULP_FCOE, 1991 rc = dev->register_device(dev, CNIC_ULP_FCOE,
1949 (void *) hba); 1992 (void *) hba);
@@ -2643,4 +2686,5 @@ static struct cnic_ulp_ops bnx2fc_cnic_cb = {
2643 .cnic_stop = bnx2fc_ulp_stop, 2686 .cnic_stop = bnx2fc_ulp_stop,
2644 .indicate_kcqes = bnx2fc_indicate_kcqe, 2687 .indicate_kcqes = bnx2fc_indicate_kcqe,
2645 .indicate_netevent = bnx2fc_indicate_netevent, 2688 .indicate_netevent = bnx2fc_indicate_netevent,
2689 .cnic_get_stats = bnx2fc_ulp_get_stats,
2646}; 2690};
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
index dc0a08e69c82..f2db5fe7bdc2 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
@@ -267,7 +267,13 @@ struct bnx2i_cmd_request {
267 * task statistics for write response 267 * task statistics for write response
268 */ 268 */
269struct bnx2i_write_resp_task_stat { 269struct bnx2i_write_resp_task_stat {
270 u32 num_data_ins; 270#if defined(__BIG_ENDIAN)
271 u16 num_r2ts;
272 u16 num_data_outs;
273#elif defined(__LITTLE_ENDIAN)
274 u16 num_data_outs;
275 u16 num_r2ts;
276#endif
271}; 277};
272 278
273/* 279/*
@@ -275,11 +281,11 @@ struct bnx2i_write_resp_task_stat {
275 */ 281 */
276struct bnx2i_read_resp_task_stat { 282struct bnx2i_read_resp_task_stat {
277#if defined(__BIG_ENDIAN) 283#if defined(__BIG_ENDIAN)
278 u16 num_data_outs; 284 u16 reserved;
279 u16 num_r2ts; 285 u16 num_data_ins;
280#elif defined(__LITTLE_ENDIAN) 286#elif defined(__LITTLE_ENDIAN)
281 u16 num_r2ts; 287 u16 num_data_ins;
282 u16 num_data_outs; 288 u16 reserved;
283#endif 289#endif
284}; 290};
285 291
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 7e77cf620291..3f9e7061258e 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -44,6 +44,8 @@
44#include "57xx_iscsi_hsi.h" 44#include "57xx_iscsi_hsi.h"
45#include "57xx_iscsi_constants.h" 45#include "57xx_iscsi_constants.h"
46 46
47#include "../../net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h"
48
47#define BNX2_ISCSI_DRIVER_NAME "bnx2i" 49#define BNX2_ISCSI_DRIVER_NAME "bnx2i"
48 50
49#define BNX2I_MAX_ADAPTERS 8 51#define BNX2I_MAX_ADAPTERS 8
@@ -126,6 +128,43 @@
126#define REG_WR(__hba, offset, val) \ 128#define REG_WR(__hba, offset, val) \
127 writel(val, __hba->regview + offset) 129 writel(val, __hba->regview + offset)
128 130
131#ifdef CONFIG_32BIT
132#define GET_STATS_64(__hba, dst, field) \
133 do { \
134 spin_lock_bh(&__hba->stat_lock); \
135 dst->field##_lo = __hba->stats.field##_lo; \
136 dst->field##_hi = __hba->stats.field##_hi; \
137 spin_unlock_bh(&__hba->stat_lock); \
138 } while (0)
139
140#define ADD_STATS_64(__hba, field, len) \
141 do { \
142 if (spin_trylock(&__hba->stat_lock)) { \
143 if (__hba->stats.field##_lo + len < \
144 __hba->stats.field##_lo) \
145 __hba->stats.field##_hi++; \
146 __hba->stats.field##_lo += len; \
147 spin_unlock(&__hba->stat_lock); \
148 } \
149 } while (0)
150
151#else
152#define GET_STATS_64(__hba, dst, field) \
153 do { \
154 u64 val, *out; \
155 \
156 val = __hba->bnx2i_stats.field; \
157 out = (u64 *)&__hba->stats.field##_lo; \
158 *out = cpu_to_le64(val); \
159 out = (u64 *)&dst->field##_lo; \
160 *out = cpu_to_le64(val); \
161 } while (0)
162
163#define ADD_STATS_64(__hba, field, len) \
164 do { \
165 __hba->bnx2i_stats.field += len; \
166 } while (0)
167#endif
129 168
130/** 169/**
131 * struct generic_pdu_resc - login pdu resource structure 170 * struct generic_pdu_resc - login pdu resource structure
@@ -288,6 +327,15 @@ struct iscsi_cid_queue {
288 struct bnx2i_conn **conn_cid_tbl; 327 struct bnx2i_conn **conn_cid_tbl;
289}; 328};
290 329
330
331struct bnx2i_stats_info {
332 u64 rx_pdus;
333 u64 rx_bytes;
334 u64 tx_pdus;
335 u64 tx_bytes;
336};
337
338
291/** 339/**
292 * struct bnx2i_hba - bnx2i adapter structure 340 * struct bnx2i_hba - bnx2i adapter structure
293 * 341 *
@@ -341,6 +389,8 @@ struct iscsi_cid_queue {
341 * @ctx_ccell_tasks: captures number of ccells and tasks supported by 389 * @ctx_ccell_tasks: captures number of ccells and tasks supported by
342 * currently offloaded connection, used to decode 390 * currently offloaded connection, used to decode
343 * context memory 391 * context memory
392 * @stat_lock: spin lock used by the statistic collector (32 bit)
393 * @stats: local iSCSI statistic collection place holder
344 * 394 *
345 * Adapter Data Structure 395 * Adapter Data Structure
346 */ 396 */
@@ -427,6 +477,12 @@ struct bnx2i_hba {
427 u32 num_sess_opened; 477 u32 num_sess_opened;
428 u32 num_conn_opened; 478 u32 num_conn_opened;
429 unsigned int ctx_ccell_tasks; 479 unsigned int ctx_ccell_tasks;
480
481#ifdef CONFIG_32BIT
482 spinlock_t stat_lock;
483#endif
484 struct bnx2i_stats_info bnx2i_stats;
485 struct iscsi_stats_info stats;
430}; 486};
431 487
432 488
@@ -750,6 +806,8 @@ extern void bnx2i_ulp_init(struct cnic_dev *dev);
750extern void bnx2i_ulp_exit(struct cnic_dev *dev); 806extern void bnx2i_ulp_exit(struct cnic_dev *dev);
751extern void bnx2i_start(void *handle); 807extern void bnx2i_start(void *handle);
752extern void bnx2i_stop(void *handle); 808extern void bnx2i_stop(void *handle);
809extern int bnx2i_get_stats(void *handle);
810
753extern struct bnx2i_hba *get_adapter_list_head(void); 811extern struct bnx2i_hba *get_adapter_list_head(void);
754 812
755struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba, 813struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 86a12b48e477..33d6630529de 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1350,6 +1350,7 @@ int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
1350 struct cqe *cqe) 1350 struct cqe *cqe)
1351{ 1351{
1352 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; 1352 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1353 struct bnx2i_hba *hba = bnx2i_conn->hba;
1353 struct bnx2i_cmd_response *resp_cqe; 1354 struct bnx2i_cmd_response *resp_cqe;
1354 struct bnx2i_cmd *bnx2i_cmd; 1355 struct bnx2i_cmd *bnx2i_cmd;
1355 struct iscsi_task *task; 1356 struct iscsi_task *task;
@@ -1367,16 +1368,26 @@ int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
1367 1368
1368 if (bnx2i_cmd->req.op_attr & ISCSI_CMD_REQUEST_READ) { 1369 if (bnx2i_cmd->req.op_attr & ISCSI_CMD_REQUEST_READ) {
1369 conn->datain_pdus_cnt += 1370 conn->datain_pdus_cnt +=
1370 resp_cqe->task_stat.read_stat.num_data_outs; 1371 resp_cqe->task_stat.read_stat.num_data_ins;
1371 conn->rxdata_octets += 1372 conn->rxdata_octets +=
1372 bnx2i_cmd->req.total_data_transfer_length; 1373 bnx2i_cmd->req.total_data_transfer_length;
1374 ADD_STATS_64(hba, rx_pdus,
1375 resp_cqe->task_stat.read_stat.num_data_ins);
1376 ADD_STATS_64(hba, rx_bytes,
1377 bnx2i_cmd->req.total_data_transfer_length);
1373 } else { 1378 } else {
1374 conn->dataout_pdus_cnt += 1379 conn->dataout_pdus_cnt +=
1375 resp_cqe->task_stat.read_stat.num_data_outs; 1380 resp_cqe->task_stat.write_stat.num_data_outs;
1376 conn->r2t_pdus_cnt += 1381 conn->r2t_pdus_cnt +=
1377 resp_cqe->task_stat.read_stat.num_r2ts; 1382 resp_cqe->task_stat.write_stat.num_r2ts;
1378 conn->txdata_octets += 1383 conn->txdata_octets +=
1379 bnx2i_cmd->req.total_data_transfer_length; 1384 bnx2i_cmd->req.total_data_transfer_length;
1385 ADD_STATS_64(hba, tx_pdus,
1386 resp_cqe->task_stat.write_stat.num_data_outs);
1387 ADD_STATS_64(hba, tx_bytes,
1388 bnx2i_cmd->req.total_data_transfer_length);
1389 ADD_STATS_64(hba, rx_pdus,
1390 resp_cqe->task_stat.write_stat.num_r2ts);
1380 } 1391 }
1381 bnx2i_iscsi_unmap_sg_list(bnx2i_cmd); 1392 bnx2i_iscsi_unmap_sg_list(bnx2i_cmd);
1382 1393
@@ -1961,6 +1972,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
1961{ 1972{
1962 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; 1973 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1963 struct iscsi_session *session = conn->session; 1974 struct iscsi_session *session = conn->session;
1975 struct bnx2i_hba *hba = bnx2i_conn->hba;
1964 struct qp_info *qp; 1976 struct qp_info *qp;
1965 struct bnx2i_nop_in_msg *nopin; 1977 struct bnx2i_nop_in_msg *nopin;
1966 int tgt_async_msg; 1978 int tgt_async_msg;
@@ -1973,7 +1985,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
1973 1985
1974 if (!qp->cq_virt) { 1986 if (!qp->cq_virt) {
1975 printk(KERN_ALERT "bnx2i (%s): cq resr freed in bh execution!", 1987 printk(KERN_ALERT "bnx2i (%s): cq resr freed in bh execution!",
1976 bnx2i_conn->hba->netdev->name); 1988 hba->netdev->name);
1977 goto out; 1989 goto out;
1978 } 1990 }
1979 while (1) { 1991 while (1) {
@@ -1985,9 +1997,9 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
1985 if (nopin->op_code == ISCSI_OP_NOOP_IN && 1997 if (nopin->op_code == ISCSI_OP_NOOP_IN &&
1986 nopin->itt == (u16) RESERVED_ITT) { 1998 nopin->itt == (u16) RESERVED_ITT) {
1987 printk(KERN_ALERT "bnx2i: Unsolicited " 1999 printk(KERN_ALERT "bnx2i: Unsolicited "
1988 "NOP-In detected for suspended " 2000 "NOP-In detected for suspended "
1989 "connection dev=%s!\n", 2001 "connection dev=%s!\n",
1990 bnx2i_conn->hba->netdev->name); 2002 hba->netdev->name);
1991 bnx2i_unsol_pdu_adjust_rq(bnx2i_conn); 2003 bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
1992 goto cqe_out; 2004 goto cqe_out;
1993 } 2005 }
@@ -2001,7 +2013,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
2001 /* Run the kthread engine only for data cmds 2013 /* Run the kthread engine only for data cmds
2002 All other cmds will be completed in this bh! */ 2014 All other cmds will be completed in this bh! */
2003 bnx2i_queue_scsi_cmd_resp(session, bnx2i_conn, nopin); 2015 bnx2i_queue_scsi_cmd_resp(session, bnx2i_conn, nopin);
2004 break; 2016 goto done;
2005 case ISCSI_OP_LOGIN_RSP: 2017 case ISCSI_OP_LOGIN_RSP:
2006 bnx2i_process_login_resp(session, bnx2i_conn, 2018 bnx2i_process_login_resp(session, bnx2i_conn,
2007 qp->cq_cons_qe); 2019 qp->cq_cons_qe);
@@ -2044,11 +2056,15 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
2044 printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n", 2056 printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
2045 nopin->op_code); 2057 nopin->op_code);
2046 } 2058 }
2059
2060 ADD_STATS_64(hba, rx_pdus, 1);
2061 ADD_STATS_64(hba, rx_bytes, nopin->data_length);
2062done:
2047 if (!tgt_async_msg) { 2063 if (!tgt_async_msg) {
2048 if (!atomic_read(&bnx2i_conn->ep->num_active_cmds)) 2064 if (!atomic_read(&bnx2i_conn->ep->num_active_cmds))
2049 printk(KERN_ALERT "bnx2i (%s): no active cmd! " 2065 printk(KERN_ALERT "bnx2i (%s): no active cmd! "
2050 "op 0x%x\n", 2066 "op 0x%x\n",
2051 bnx2i_conn->hba->netdev->name, 2067 hba->netdev->name,
2052 nopin->op_code); 2068 nopin->op_code);
2053 else 2069 else
2054 atomic_dec(&bnx2i_conn->ep->num_active_cmds); 2070 atomic_dec(&bnx2i_conn->ep->num_active_cmds);
@@ -2692,6 +2708,7 @@ struct cnic_ulp_ops bnx2i_cnic_cb = {
2692 .cm_remote_close = bnx2i_cm_remote_close, 2708 .cm_remote_close = bnx2i_cm_remote_close,
2693 .cm_remote_abort = bnx2i_cm_remote_abort, 2709 .cm_remote_abort = bnx2i_cm_remote_abort,
2694 .iscsi_nl_send_msg = bnx2i_send_nl_mesg, 2710 .iscsi_nl_send_msg = bnx2i_send_nl_mesg,
2711 .cnic_get_stats = bnx2i_get_stats,
2695 .owner = THIS_MODULE 2712 .owner = THIS_MODULE
2696}; 2713};
2697 2714
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 8b6816706ee5..b17637aab9a7 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -381,6 +381,46 @@ void bnx2i_ulp_exit(struct cnic_dev *dev)
381 381
382 382
383/** 383/**
384 * bnx2i_get_stats - Retrieve various statistic from iSCSI offload
385 * @handle: bnx2i_hba
386 *
387 * function callback exported via bnx2i - cnic driver interface to
388 * retrieve various iSCSI offload related statistics.
389 */
390int bnx2i_get_stats(void *handle)
391{
392 struct bnx2i_hba *hba = handle;
393 struct iscsi_stats_info *stats;
394
395 if (!hba)
396 return -EINVAL;
397
398 stats = (struct iscsi_stats_info *)hba->cnic->stats_addr;
399
400 if (!stats)
401 return -ENOMEM;
402
403 strlcpy(stats->version, DRV_MODULE_VERSION, sizeof(stats->version));
404 memcpy(stats->mac_add1 + 2, hba->cnic->mac_addr, ETH_ALEN);
405
406 stats->max_frame_size = hba->netdev->mtu;
407 stats->txq_size = hba->max_sqes;
408 stats->rxq_size = hba->max_cqes;
409
410 stats->txq_avg_depth = 0;
411 stats->rxq_avg_depth = 0;
412
413 GET_STATS_64(hba, stats, rx_pdus);
414 GET_STATS_64(hba, stats, rx_bytes);
415
416 GET_STATS_64(hba, stats, tx_pdus);
417 GET_STATS_64(hba, stats, tx_bytes);
418
419 return 0;
420}
421
422
423/**
384 * bnx2i_percpu_thread_create - Create a receive thread for an 424 * bnx2i_percpu_thread_create - Create a receive thread for an
385 * online CPU 425 * online CPU
386 * 426 *
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 621538b8b544..3b34c13e2f02 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -874,6 +874,11 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
874 hba->conn_ctx_destroy_tmo = 2 * HZ; 874 hba->conn_ctx_destroy_tmo = 2 * HZ;
875 } 875 }
876 876
877#ifdef CONFIG_32BIT
878 spin_lock_init(&hba->stat_lock);
879#endif
880 memset(&hba->stats, 0, sizeof(struct iscsi_stats_info));
881
877 if (iscsi_host_add(shost, &hba->pcidev->dev)) 882 if (iscsi_host_add(shost, &hba->pcidev->dev))
878 goto free_dump_mem; 883 goto free_dump_mem;
879 return hba; 884 return hba;
@@ -1181,12 +1186,18 @@ static int
1181bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) 1186bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
1182{ 1187{
1183 struct bnx2i_conn *bnx2i_conn = conn->dd_data; 1188 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1189 struct bnx2i_hba *hba = bnx2i_conn->hba;
1184 struct bnx2i_cmd *cmd = task->dd_data; 1190 struct bnx2i_cmd *cmd = task->dd_data;
1185 1191
1186 memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN); 1192 memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN);
1187 1193
1188 bnx2i_setup_cmd_wqe_template(cmd); 1194 bnx2i_setup_cmd_wqe_template(cmd);
1189 bnx2i_conn->gen_pdu.req_buf_size = task->data_count; 1195 bnx2i_conn->gen_pdu.req_buf_size = task->data_count;
1196
1197 /* Tx PDU/data length count */
1198 ADD_STATS_64(hba, tx_pdus, 1);
1199 ADD_STATS_64(hba, tx_bytes, task->data_count);
1200
1190 if (task->data_count) { 1201 if (task->data_count) {
1191 memcpy(bnx2i_conn->gen_pdu.req_buf, task->data, 1202 memcpy(bnx2i_conn->gen_pdu.req_buf, task->data,
1192 task->data_count); 1203 task->data_count);
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 36739da8bc15..49692a1ac44a 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -966,7 +966,8 @@ static int init_act_open(struct cxgbi_sock *csk)
966 csk->saddr.sin_addr.s_addr = chba->ipv4addr; 966 csk->saddr.sin_addr.s_addr = chba->ipv4addr;
967 967
968 csk->rss_qid = 0; 968 csk->rss_qid = 0;
969 csk->l2t = t3_l2t_get(t3dev, dst, ndev); 969 csk->l2t = t3_l2t_get(t3dev, dst, ndev,
970 &csk->daddr.sin_addr.s_addr);
970 if (!csk->l2t) { 971 if (!csk->l2t) {
971 pr_err("NO l2t available.\n"); 972 pr_err("NO l2t available.\n");
972 return -EINVAL; 973 return -EINVAL;
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 5a4a3bfc60cf..cc9a06897f34 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1142,7 +1142,7 @@ static int init_act_open(struct cxgbi_sock *csk)
1142 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); 1142 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
1143 cxgbi_sock_get(csk); 1143 cxgbi_sock_get(csk);
1144 1144
1145 n = dst_get_neighbour_noref(csk->dst); 1145 n = dst_neigh_lookup(csk->dst, &csk->daddr.sin_addr.s_addr);
1146 if (!n) { 1146 if (!n) {
1147 pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name); 1147 pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
1148 goto rel_resource; 1148 goto rel_resource;
@@ -1182,9 +1182,12 @@ static int init_act_open(struct cxgbi_sock *csk)
1182 1182
1183 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); 1183 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
1184 send_act_open_req(csk, skb, csk->l2t); 1184 send_act_open_req(csk, skb, csk->l2t);
1185 neigh_release(n);
1185 return 0; 1186 return 0;
1186 1187
1187rel_resource: 1188rel_resource:
1189 if (n)
1190 neigh_release(n);
1188 if (skb) 1191 if (skb)
1189 __kfree_skb(skb); 1192 __kfree_skb(skb);
1190 return -EINVAL; 1193 return -EINVAL;
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index d9253db1d0e2..b44c1cff3114 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -494,7 +494,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
494 goto err_out; 494 goto err_out;
495 } 495 }
496 dst = &rt->dst; 496 dst = &rt->dst;
497 n = dst_get_neighbour_noref(dst); 497 n = dst_neigh_lookup(dst, &daddr->sin_addr.s_addr);
498 if (!n) { 498 if (!n) {
499 err = -ENODEV; 499 err = -ENODEV;
500 goto rel_rt; 500 goto rel_rt;
@@ -506,7 +506,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
506 &daddr->sin_addr.s_addr, ntohs(daddr->sin_port), 506 &daddr->sin_addr.s_addr, ntohs(daddr->sin_port),
507 ndev->name); 507 ndev->name);
508 err = -ENETUNREACH; 508 err = -ENETUNREACH;
509 goto rel_rt; 509 goto rel_neigh;
510 } 510 }
511 511
512 if (ndev->flags & IFF_LOOPBACK) { 512 if (ndev->flags & IFF_LOOPBACK) {
@@ -521,7 +521,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
521 pr_info("dst %pI4, %s, NOT cxgbi device.\n", 521 pr_info("dst %pI4, %s, NOT cxgbi device.\n",
522 &daddr->sin_addr.s_addr, ndev->name); 522 &daddr->sin_addr.s_addr, ndev->name);
523 err = -ENETUNREACH; 523 err = -ENETUNREACH;
524 goto rel_rt; 524 goto rel_neigh;
525 } 525 }
526 log_debug(1 << CXGBI_DBG_SOCK, 526 log_debug(1 << CXGBI_DBG_SOCK,
527 "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n", 527 "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n",
@@ -531,7 +531,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
531 csk = cxgbi_sock_create(cdev); 531 csk = cxgbi_sock_create(cdev);
532 if (!csk) { 532 if (!csk) {
533 err = -ENOMEM; 533 err = -ENOMEM;
534 goto rel_rt; 534 goto rel_neigh;
535 } 535 }
536 csk->cdev = cdev; 536 csk->cdev = cdev;
537 csk->port_id = port; 537 csk->port_id = port;
@@ -541,9 +541,13 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
541 csk->daddr.sin_port = daddr->sin_port; 541 csk->daddr.sin_port = daddr->sin_port;
542 csk->daddr.sin_family = daddr->sin_family; 542 csk->daddr.sin_family = daddr->sin_family;
543 csk->saddr.sin_addr.s_addr = fl4.saddr; 543 csk->saddr.sin_addr.s_addr = fl4.saddr;
544 neigh_release(n);
544 545
545 return csk; 546 return csk;
546 547
548rel_neigh:
549 neigh_release(n);
550
547rel_rt: 551rel_rt:
548 ip_rt_put(rt); 552 ip_rt_put(rt);
549 if (csk) 553 if (csk)
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index c77628afbf9f..8818dd681c19 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -486,6 +486,10 @@ void
486scsi_netlink_init(void) 486scsi_netlink_init(void)
487{ 487{
488 int error; 488 int error;
489 struct netlink_kernel_cfg cfg = {
490 .input = scsi_nl_rcv_msg,
491 .groups = SCSI_NL_GRP_CNT,
492 };
489 493
490 INIT_LIST_HEAD(&scsi_nl_drivers); 494 INIT_LIST_HEAD(&scsi_nl_drivers);
491 495
@@ -497,8 +501,7 @@ scsi_netlink_init(void)
497 } 501 }
498 502
499 scsi_nl_sock = netlink_kernel_create(&init_net, NETLINK_SCSITRANSPORT, 503 scsi_nl_sock = netlink_kernel_create(&init_net, NETLINK_SCSITRANSPORT,
500 SCSI_NL_GRP_CNT, scsi_nl_rcv_msg, NULL, 504 THIS_MODULE, &cfg);
501 THIS_MODULE);
502 if (!scsi_nl_sock) { 505 if (!scsi_nl_sock) {
503 printk(KERN_ERR "%s: register of receive handler failed\n", 506 printk(KERN_ERR "%s: register of receive handler failed\n",
504 __func__); 507 __func__);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 1cf640e575da..6042954d8f3b 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2936,7 +2936,10 @@ EXPORT_SYMBOL_GPL(iscsi_unregister_transport);
2936static __init int iscsi_transport_init(void) 2936static __init int iscsi_transport_init(void)
2937{ 2937{
2938 int err; 2938 int err;
2939 2939 struct netlink_kernel_cfg cfg = {
2940 .groups = 1,
2941 .input = iscsi_if_rx,
2942 };
2940 printk(KERN_INFO "Loading iSCSI transport class v%s.\n", 2943 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
2941 ISCSI_TRANSPORT_VERSION); 2944 ISCSI_TRANSPORT_VERSION);
2942 2945
@@ -2966,8 +2969,8 @@ static __init int iscsi_transport_init(void)
2966 if (err) 2969 if (err)
2967 goto unregister_conn_class; 2970 goto unregister_conn_class;
2968 2971
2969 nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx, 2972 nls = netlink_kernel_create(&init_net, NETLINK_ISCSI,
2970 NULL, THIS_MODULE); 2973 THIS_MODULE, &cfg);
2971 if (!nls) { 2974 if (!nls) {
2972 err = -ENOBUFS; 2975 err = -ENOBUFS;
2973 goto unregister_session_class; 2976 goto unregister_session_class;
diff --git a/drivers/ssb/b43_pci_bridge.c b/drivers/ssb/b43_pci_bridge.c
index f551e5376147..266aa1648a02 100644
--- a/drivers/ssb/b43_pci_bridge.c
+++ b/drivers/ssb/b43_pci_bridge.c
@@ -36,6 +36,7 @@ static const struct pci_device_id b43_pci_bridge_tbl[] = {
36 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4328) }, 36 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4328) },
37 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4329) }, 37 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4329) },
38 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432b) }, 38 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432b) },
39 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432c) },
39 { 0, }, 40 { 0, },
40}; 41};
41MODULE_DEVICE_TABLE(pci, b43_pci_bridge_tbl); 42MODULE_DEVICE_TABLE(pci, b43_pci_bridge_tbl);
diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c
index 266c7c5c86dc..ab4627cf1114 100644
--- a/drivers/ssb/scan.c
+++ b/drivers/ssb/scan.c
@@ -90,6 +90,8 @@ const char *ssb_core_name(u16 coreid)
90 return "ARM 1176"; 90 return "ARM 1176";
91 case SSB_DEV_ARM_7TDMI: 91 case SSB_DEV_ARM_7TDMI:
92 return "ARM 7TDMI"; 92 return "ARM 7TDMI";
93 case SSB_DEV_ARM_CM3:
94 return "ARM Cortex M3";
93 } 95 }
94 return "UNKNOWN"; 96 return "UNKNOWN";
95} 97}
diff --git a/drivers/staging/gdm72xx/netlink_k.c b/drivers/staging/gdm72xx/netlink_k.c
index 51665132c61b..87c3a07ed80e 100644
--- a/drivers/staging/gdm72xx/netlink_k.c
+++ b/drivers/staging/gdm72xx/netlink_k.c
@@ -88,13 +88,15 @@ struct sock *netlink_init(int unit, void (*cb)(struct net_device *dev, u16 type,
88 void *msg, int len)) 88 void *msg, int len))
89{ 89{
90 struct sock *sock; 90 struct sock *sock;
91 struct netlink_kernel_cfg cfg = {
92 .input = netlink_rcv,
93 };
91 94
92#if !defined(DEFINE_MUTEX) 95#if !defined(DEFINE_MUTEX)
93 init_MUTEX(&netlink_mutex); 96 init_MUTEX(&netlink_mutex);
94#endif 97#endif
95 98
96 sock = netlink_kernel_create(&init_net, unit, 0, netlink_rcv, NULL, 99 sock = netlink_kernel_create(&init_net, unit, THIS_MODULE, &cfg);
97 THIS_MODULE);
98 100
99 if (sock) 101 if (sock)
100 rcv_cb = cb; 102 rcv_cb = cb;
@@ -127,8 +129,12 @@ int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len)
127 } 129 }
128 130
129 seq++; 131 seq++;
130 nlh = NLMSG_PUT(skb, 0, seq, type, len); 132 nlh = nlmsg_put(skb, 0, seq, type, len, 0);
131 memcpy(NLMSG_DATA(nlh), msg, len); 133 if (!nlh) {
134 kfree_skb(skb);
135 return -EMSGSIZE;
136 }
137 memcpy(nlmsg_data(nlh), msg, len);
132 138
133 NETLINK_CB(skb).pid = 0; 139 NETLINK_CB(skb).pid = 0;
134 NETLINK_CB(skb).dst_group = 0; 140 NETLINK_CB(skb).dst_group = 0;
@@ -144,7 +150,5 @@ int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len)
144 } 150 }
145 ret = 0; 151 ret = 0;
146 } 152 }
147
148nlmsg_failure:
149 return ret; 153 return ret;
150} 154}
diff --git a/drivers/usb/atm/xusbatm.c b/drivers/usb/atm/xusbatm.c
index 14ec9f0c5924..b3b1bb78b2ef 100644
--- a/drivers/usb/atm/xusbatm.c
+++ b/drivers/usb/atm/xusbatm.c
@@ -20,7 +20,7 @@
20 ******************************************************************************/ 20 ******************************************************************************/
21 21
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/etherdevice.h> /* for random_ether_addr() */ 23#include <linux/etherdevice.h> /* for eth_random_addr() */
24 24
25#include "usbatm.h" 25#include "usbatm.h"
26 26
@@ -163,7 +163,7 @@ static int xusbatm_atm_start(struct usbatm_data *usbatm,
163 atm_dbg(usbatm, "%s entered\n", __func__); 163 atm_dbg(usbatm, "%s entered\n", __func__);
164 164
165 /* use random MAC as we've no way to get it from the device */ 165 /* use random MAC as we've no way to get it from the device */
166 random_ether_addr(atm_dev->esi); 166 eth_random_addr(atm_dev->esi);
167 167
168 return 0; 168 return 0;
169} 169}
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index 47cf48b51c9d..b9e1925b2df0 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -724,7 +724,7 @@ static int get_ether_addr(const char *str, u8 *dev_addr)
724 if (is_valid_ether_addr(dev_addr)) 724 if (is_valid_ether_addr(dev_addr))
725 return 0; 725 return 0;
726 } 726 }
727 random_ether_addr(dev_addr); 727 eth_random_addr(dev_addr);
728 return 1; 728 return 1;
729} 729}
730 730